From 6c8770e80d0339e037407cd59e3c41464f1ad385 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 29 Apr 2025 11:43:25 +1000 Subject: [PATCH 01/44] Change default state cache size back to 128 (#7364) Closes: - https://github.com/sigp/lighthouse/issues/7363 - Change default state cache size back to 128. - Make state pruning properly LRU rather than MSU after skipping the cull-exempt states. --- beacon_node/src/cli.rs | 2 +- beacon_node/store/src/state_cache.rs | 4 +- book/src/help_bn.md | 2 +- lighthouse/tests/beacon_node.rs | 2 +- scripts/tests/doppelganger_protection.sh | 66 +++++++++++++++--------- 5 files changed, 48 insertions(+), 28 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 5de096b25f..271c6d3b68 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -821,7 +821,7 @@ pub fn cli_app() -> Command { .long("state-cache-size") .value_name("STATE_CACHE_SIZE") .help("Specifies the size of the state cache") - .default_value("32") + .default_value("128") .action(ArgAction::Set) .display_order(0) ) diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 281ecab152..bc062a4e0c 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -245,7 +245,9 @@ impl StateCache { let mut old_boundary_state_roots = vec![]; let mut good_boundary_state_roots = vec![]; - for (&state_root, (_, state)) in self.states.iter().skip(cull_exempt) { + // Skip the `cull_exempt` most-recently used, then reverse the iterator to start at + // least-recently used states. + for (&state_root, (_, state)) in self.states.iter().skip(cull_exempt).rev() { let is_advanced = state.slot() > state.latest_block_header().slot; let is_boundary = state.slot() % E::slots_per_epoch() == 0; let could_finalize = diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 1942f737df..04b40f224e 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -389,7 +389,7 @@ Options: Minimum number of states to cull from the state cache when it gets full [default: 1] --state-cache-size - Specifies the size of the state cache [default: 32] + Specifies the size of the state cache [default: 128] --suggested-fee-recipient Emergency fallback fee recipient for use in case the validator client does not have one configured. You should set this flag on the diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 86104ce050..ac94da3552 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1873,7 +1873,7 @@ fn block_cache_size_flag() { fn state_cache_size_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(32))); + .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(128))); } #[test] fn state_cache_size_flag() { diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 5be5c13dee..288f9d72d0 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -74,18 +74,27 @@ if [[ "$BEHAVIOR" == "failure" ]]; then vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end" service_name=vc-1-doppelganger - kurtosis service add \ - --files /validator_keys:$vc_1_keys_artifact_id,/testnet:el_cl_genesis_data \ - $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ - vc \ - --debug-level debug \ - --testnet-dir=/testnet \ - --validators-dir=/validator_keys/keys \ - --secrets-dir=/validator_keys/secrets \ - --init-slashing-protection \ - --beacon-nodes=http://$bn_2_url:$bn_2_port \ - --enable-doppelganger-protection \ - --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + kurtosis service add $ENCLAVE_NAME $service_name --json-service-config - << EOF + { + "image": "$LH_IMAGE_NAME", + "files": { + "/validator_keys": ["$vc_1_keys_artifact_id"], + "/testnet": ["el_cl_genesis_data"] + }, + "cmd": [ + "lighthouse", + "vc", + "--debug-level", "info", + "--testnet-dir=/testnet", + "--validators-dir=/validator_keys/keys", + "--secrets-dir=/validator_keys/secrets", + "--init-slashing-protection", + "--beacon-nodes=http://$bn_2_url:$bn_2_port", + "--enable-doppelganger-protection", + "--suggested-fee-recipient", "0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990" + ] + } +EOF # Check if doppelganger VC has stopped and exited. Exit code 1 means the check timed out and VC is still running. check_exit_cmd="until [ \$(get_service_status $service_name) != 'RUNNING' ]; do sleep 1; done" @@ -110,18 +119,27 @@ if [[ "$BEHAVIOR" == "success" ]]; then vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end" service_name=vc-4 - kurtosis service add \ - --files /validator_keys:$vc_4_keys_artifact_id,/testnet:el_cl_genesis_data \ - $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ - vc \ - --debug-level debug \ - --testnet-dir=/testnet \ - --validators-dir=/validator_keys/keys \ - --secrets-dir=/validator_keys/secrets \ - --init-slashing-protection \ - --beacon-nodes=http://$bn_2_url:$bn_2_port \ - --enable-doppelganger-protection \ - --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + kurtosis service add $ENCLAVE_NAME $service_name --json-service-config - << EOF + { + "image": "$LH_IMAGE_NAME", + "files": { + "/validator_keys": ["$vc_4_keys_artifact_id"], + "/testnet": ["el_cl_genesis_data"] + }, + "cmd": [ + "lighthouse", + "vc", + "--debug-level", "info", + "--testnet-dir=/testnet", + "--validators-dir=/validator_keys/keys", + "--secrets-dir=/validator_keys/secrets", + "--init-slashing-protection", + "--beacon-nodes=http://$bn_2_url:$bn_2_port", + "--enable-doppelganger-protection", + "--suggested-fee-recipient", "0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990" + ] + } +EOF doppelganger_failure=0 From e42406d7b79a85ad4622f3a7440ff6468ac4c9e1 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 29 Apr 2025 22:29:42 +1000 Subject: [PATCH 02/44] Release v7.0.1 (#7374) Bump all required version numbers to `v7.0.1` to prepare for the next release. --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e15ce9a58..2fbe9addb9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -860,7 +860,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0" +version = "7.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -1108,7 +1108,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0" +version = "7.0.1" dependencies = [ "beacon_node", "bytes", @@ -4813,7 +4813,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0" +version = "7.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -5368,7 +5368,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0" +version = "7.0.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 79dad05886..1d4ae3aa27 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0" +version = "7.0.1" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 919e3976e7..16023a1d8b 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-", - fallback = "Lighthouse/v7.0.0" + prefix = "Lighthouse/v7.0.1-", + fallback = "Lighthouse/v7.0.1" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0" + "7.0.1" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 639f2130a4..a543719d25 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0" +version = "7.0.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 50a80cbbe3..c423ba0722 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0" +version = "7.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From b1138c28fb940b49f52bb627c37b891adaab2fd8 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 15 May 2025 15:54:24 +1000 Subject: [PATCH 03/44] Add additional mergify rules to automate triaging (#7451) * Add additional mergify rules to automate triaging. * Update mergify config. --- .github/mergify.yml | 52 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index 73267904b8..a314ec3e98 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -12,6 +12,58 @@ pull_request_rules: comment: message: This pull request has merge conflicts. Could you please resolve them @{{author}}? 🙏 + label: + add: + - waiting-on-author + remove: + - ready-for-review + + - name: Ask to resolve CI failures + conditions: + - or: + - check-failure=test-suite-success + - check-skipped=test-suite-success + - check-failure=local-testnet-success + - check-skipped=local-testnet-success + actions: + comment: + message: Some required checks have failed. Could you please take a look @{{author}}? 🙏 + label: + add: + - waiting-on-author + remove: + - ready-for-review + + - name: Update labels when PR is unblocked + conditions: + - label=waiting-on-author + - -conflict + - check-failure!=test-suite-success + - check-failure!=local-testnet-success + - "#review-requested > 0" + actions: + label: + remove: + - waiting-on-author + add: + - ready-for-review + comment: + message: > + All required checks have passed and there are no merge conflicts. + This pull request may now be ready for another review. + + - name: Close stale pull request after 30 days of inactivity + conditions: + - label=waiting-on-author + - updated-at<=30 days ago + actions: + close: + message: > + Hi @{{author}}, this pull request has been closed automatically due to 30 days of inactivity. + If you’d like to continue working on it, feel free to reopen at any time. + label: + add: + - stale - name: Approve trivial maintainer PRs conditions: From cc6ae9d3f09c412a97e887a87a00a9877282f294 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 15 May 2025 16:54:56 +1000 Subject: [PATCH 04/44] Fix mergify infinite loop. (#7463) * Fix mergify infinite loop. * Update rule for `ready-for-review` label. * More fix to prevent infinite loop --- .github/mergify.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index a314ec3e98..718c8ba3fe 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -1,6 +1,7 @@ pull_request_rules: - name: Ask to resolve conflict conditions: + - -closed - conflict - -author=dependabot[bot] - or: @@ -20,6 +21,7 @@ pull_request_rules: - name: Ask to resolve CI failures conditions: + - -closed - or: - check-failure=test-suite-success - check-skipped=test-suite-success @@ -36,10 +38,15 @@ pull_request_rules: - name: Update labels when PR is unblocked conditions: + - -closed + - -draft - label=waiting-on-author - -conflict + # Need to be the logical opposite of the above rule `Ask to resolve CI failures`, otherwise mergify will run into an infinite loop. - check-failure!=test-suite-success + - check-skipped!=test-suite-success - check-failure!=local-testnet-success + - check-skipped!=local-testnet-success - "#review-requested > 0" actions: label: @@ -54,6 +61,7 @@ pull_request_rules: - name: Close stale pull request after 30 days of inactivity conditions: + - -closed - label=waiting-on-author - updated-at<=30 days ago actions: From e0ee148d6acac639d2b847f86018ae1d482837ab Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 16 May 2025 11:46:21 +1000 Subject: [PATCH 05/44] Prevent mergify from updating labels while CI is still running. (#7470) --- .github/mergify.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 718c8ba3fe..7b8a138f57 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -23,10 +23,10 @@ pull_request_rules: conditions: - -closed - or: - - check-failure=test-suite-success - check-skipped=test-suite-success - - check-failure=local-testnet-success - check-skipped=local-testnet-success + - check-failure=test-suite-success + - check-failure=local-testnet-success actions: comment: message: Some required checks have failed. Could you please take a look @{{author}}? 🙏 @@ -42,11 +42,10 @@ pull_request_rules: - -draft - label=waiting-on-author - -conflict - # Need to be the logical opposite of the above rule `Ask to resolve CI failures`, otherwise mergify will run into an infinite loop. - - check-failure!=test-suite-success - - check-skipped!=test-suite-success - - check-failure!=local-testnet-success - - check-skipped!=local-testnet-success + # Unfortunately, it doesn't look like there's an easy way to check for PRs pending + # CI workflows approvals. + - check-success=local-testnet-success + - check-success=local-testnet-success - "#review-requested > 0" actions: label: From e21198c08baaa796ad0fa57b589be7632116c1af Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 16 May 2025 13:55:06 +1000 Subject: [PATCH 06/44] One more attempt to fix mergify condition. (#7472) --- .github/mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 7b8a138f57..48d76219e3 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -44,7 +44,7 @@ pull_request_rules: - -conflict # Unfortunately, it doesn't look like there's an easy way to check for PRs pending # CI workflows approvals. - - check-success=local-testnet-success + - check-success=test-suite-success - check-success=local-testnet-success - "#review-requested > 0" actions: From 7759cb8f91c01a7d335469d317b5787368656064 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 21 May 2025 18:18:48 +1000 Subject: [PATCH 07/44] Update mergify rule to not evaluate PRs that are not ready for review - to reduce noise and avoid updating stale PRs. (#7494) --- .github/mergify.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index 48d76219e3..f4d1520826 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -4,6 +4,7 @@ pull_request_rules: - -closed - conflict - -author=dependabot[bot] + - label=ready-for-review - or: - -draft # Don't report conflicts on regular draft. - and: # Do report conflicts on draft that are scheduled for the next major release. @@ -22,6 +23,7 @@ pull_request_rules: - name: Ask to resolve CI failures conditions: - -closed + - label=ready-for-review - or: - check-skipped=test-suite-success - check-skipped=local-testnet-success From 8dde5bdb4413f5f1faf3203bf405a563f5449600 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 27 May 2025 23:03:22 +1000 Subject: [PATCH 08/44] Update mergify rules so that I can add `waiting-on-author` on a PR that's passing CI. Remove noisy comments. --- .github/mergify.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index f4d1520826..a84b51b32e 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -48,17 +48,17 @@ pull_request_rules: # CI workflows approvals. - check-success=test-suite-success - check-success=local-testnet-success - - "#review-requested > 0" + # Update the label only if there are no more change requests from any reviewers and no unresolved threads. + # This rule ensures that a PR with passing CI can be marked as `waiting-on-author`. + - or: + - "#changes-requested-reviews-by = 0" + - "#review-threads-unresolved = 0" actions: label: remove: - waiting-on-author add: - ready-for-review - comment: - message: > - All required checks have passed and there are no merge conflicts. - This pull request may now be ready for another review. - name: Close stale pull request after 30 days of inactivity conditions: From b7fc03437bbab2fe6455c0065d2fbc0467b437d1 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 28 May 2025 10:55:14 +1000 Subject: [PATCH 09/44] Fix condition --- .github/mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index a84b51b32e..6caf2c4bf0 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -50,7 +50,7 @@ pull_request_rules: - check-success=local-testnet-success # Update the label only if there are no more change requests from any reviewers and no unresolved threads. # This rule ensures that a PR with passing CI can be marked as `waiting-on-author`. - - or: + - and: - "#changes-requested-reviews-by = 0" - "#review-threads-unresolved = 0" actions: From 9e9c51be6fef85c4995f032294025b3c55133d6c Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 28 May 2025 10:59:17 +1000 Subject: [PATCH 10/44] Remove redundant `and` --- .github/mergify.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 6caf2c4bf0..4ab73bcf07 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -50,9 +50,8 @@ pull_request_rules: - check-success=local-testnet-success # Update the label only if there are no more change requests from any reviewers and no unresolved threads. # This rule ensures that a PR with passing CI can be marked as `waiting-on-author`. - - and: - - "#changes-requested-reviews-by = 0" - - "#review-threads-unresolved = 0" + - "#changes-requested-reviews-by = 0" + - "#review-threads-unresolved = 0" actions: label: remove: From 257d2707182c2c1d073c53811b38e9944e07d932 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 1 Jul 2025 11:07:49 +0800 Subject: [PATCH 11/44] Add voluntary exit via validator manager (#6612) * #4303 * #4804 -Add voluntary exit feature to the validator manager -Add delete all validators by using the keyword "all" --- Cargo.lock | 3 + book/src/help_vm.md | 4 + book/src/validator_manager.md | 2 +- book/src/validator_manager_api.md | 77 +++ book/src/validator_voluntary_exit.md | 2 + lighthouse/tests/validator_manager.rs | 112 ++++ validator_client/http_api/src/test_utils.rs | 25 +- validator_manager/Cargo.toml | 3 + validator_manager/src/delete_validators.rs | 24 +- validator_manager/src/exit_validators.rs | 585 ++++++++++++++++++++ validator_manager/src/import_validators.rs | 6 +- validator_manager/src/lib.rs | 7 +- validator_manager/src/list_validators.rs | 154 +++++- wordlist.txt | 2 + 14 files changed, 983 insertions(+), 23 deletions(-) create mode 100644 validator_manager/src/exit_validators.rs diff --git a/Cargo.lock b/Cargo.lock index 7d77ce4044..5e22c9742a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9933,6 +9933,7 @@ name = "validator_manager" version = "0.1.0" dependencies = [ "account_utils", + "beacon_chain", "clap", "clap_utils", "derivative", @@ -9942,9 +9943,11 @@ dependencies = [ "eth2_wallet", "ethereum_serde_utils", "hex", + "http_api", "regex", "serde", "serde_json", + "slot_clock", "tempfile", "tokio", "tree_hash", diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 8ff54122ef..f58537ae1c 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -28,6 +28,10 @@ Commands: delete Deletes one or more validators from a validator client using the HTTP API. + exit + Exits one or more validators using the HTTP API. It can also be used + to generate a presigned voluntary exit message for a particular future + epoch. help Print this message or the help of the given subcommand(s) diff --git a/book/src/validator_manager.md b/book/src/validator_manager.md index b0190c1812..609f176901 100644 --- a/book/src/validator_manager.md +++ b/book/src/validator_manager.md @@ -32,4 +32,4 @@ The `validator-manager` boasts the following features: - [Creating and importing validators using the `create` and `import` commands.](./validator_manager_create.md) - [Moving validators between two VCs using the `move` command.](./validator_manager_move.md) -- [Managing validators such as delete, import and list validators.](./validator_manager_api.md) +- [Managing validators such as exit, delete, import and list validators.](./validator_manager_api.md) diff --git a/book/src/validator_manager_api.md b/book/src/validator_manager_api.md index 7bc5be8557..0542008463 100644 --- a/book/src/validator_manager_api.md +++ b/book/src/validator_manager_api.md @@ -2,6 +2,54 @@ The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. By default, the validator client HTTP address is `http://localhost:5062`. If a different IP address or port is used, add the flag `--vc-url http://IP:port_number` to the command below. +## Exit + +The `exit` command exits one or more validators from the validator client. To `exit`: + +> **Important note: Once the --beacon-node flag is used, it will publish the voluntary exit to the network. This action is irreversible.** + +```bash +lighthouse vm exit --vc-token --validators pubkey1,pubkey2 --beacon-node http://beacon-node-url:5052 +``` + +Example: + +```bash +lighthouse vm exit --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4,0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f --beacon-node http://localhost:5052 +``` + +If successful, the following log will be returned: + +```text +Successfully validated and published voluntary exit for validator 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4 +Successfully validated and published voluntary exit for validator +0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f +``` + +To exit all validators on the validator client, use the keyword `all`: + +```bash +lighthouse vm exit --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators all --beacon-node http://localhost:5052 +``` + +To check the voluntary exit status, refer to [the list command](./validator_manager_api.md#list). + +The following command will only generate a presigned voluntary exit message and save it to a file named `{validator_pubkey}.json`. It **will not** publish the voluntary exit to the network. + +To generate a presigned exit message and save it to a file, use the flag `--presign`: + +```bash +lighthouse vm exit --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators all --presign +``` + +To generate a presigned exit message for a particular (future) epoch, use the flag `--exit-epoch`: + +```bash +lighthouse vm exit --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators all --presign --exit-epoch 1234567 +``` + +The generated presigned exit message will only be valid at or after the specified exit-epoch, in this case, epoch 1234567. + ## Delete The `delete` command deletes one or more validators from the validator client. It will also modify the `validator_definitions.yml` file automatically so there is no manual action required from the user after the delete. To `delete`: @@ -16,6 +64,12 @@ Example: lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4,0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f ``` +To delete all validators on the validator client, use the keyword `all`: + +```bash +lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators all +``` + ## Import The `import` command imports validator keystores generated by the `ethstaker-deposit-cli`. To import a validator keystore: @@ -37,3 +91,26 @@ To list the validators running on the validator client: ```bash lighthouse vm list --vc-token ~/.lighthouse/mainnet/validators/api-token.txt ``` + +The `list` command can also be used to check the voluntary exit status of validators. To do so, use both `--beacon-node` and `--validators` flags. The `--validators` flag accepts a comma-separated list of validator public keys, or the keyword `all` to check the voluntary exit status of all validators attached to the validator client. + +```bash +lighthouse vm list --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators 0x8de7ec501d574152f52a962bf588573df2fc3563fd0c6077651208ed20f24f3d8572425706b343117b48bdca56808416 --beacon-node http://localhost:5052 +``` + +If the validator voluntary exit has been accepted by the chain, the following log will be returned: + +```text +Voluntary exit for validator 0x8de7ec501d574152f52a962bf588573df2fc3563fd0c6077651208ed20f24f3d8572425706b343117b48bdca56808416 has been accepted into the beacon chain, but not yet finalized. Finalization may take several minutes or longer. Before finalization there is a low probability that the exit may be reverted. +Current epoch: 2, Exit epoch: 7, Withdrawable epoch: 263 +Please keep your validator running till exit epoch +Exit epoch in approximately 480 secs +``` + +When the exit epoch is reached, querying the status will return: + +```text +Validator 0x8de7ec501d574152f52a962bf588573df2fc3563fd0c6077651208ed20f24f3d8572425706b343117b48bdca56808416 has exited at epoch: 7 +``` + +You can safely shut down the validator client at this point. diff --git a/book/src/validator_voluntary_exit.md b/book/src/validator_voluntary_exit.md index 2a45852f32..ff404518b7 100644 --- a/book/src/validator_voluntary_exit.md +++ b/book/src/validator_voluntary_exit.md @@ -10,6 +10,8 @@ A validator can initiate a voluntary exit provided that the validator is current It takes at a minimum 5 epochs (32 minutes) for a validator to exit after initiating a voluntary exit. This number can be much higher depending on how many other validators are queued to exit. +You can also perform voluntary exit for one or more validators using the validator manager, see [Managing Validators](./validator_manager_api.md#exit) for more details. + ## Initiating a voluntary exit In order to initiate an exit, users can use the `lighthouse account validator exit` command. diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 04e3eafe6e..5ee9b0263a 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -10,6 +10,7 @@ use types::*; use validator_manager::{ create_validators::CreateConfig, delete_validators::DeleteConfig, + exit_validators::ExitConfig, import_validators::ImportConfig, list_validators::ListConfig, move_validators::{MoveConfig, PasswordSource, Validators}, @@ -119,6 +120,12 @@ impl CommandLineTest { } } +impl CommandLineTest { + fn validators_exit() -> Self { + Self::default().flag("exit", None) + } +} + #[test] pub fn validator_create_without_output_path() { CommandLineTest::validators_create().assert_failed(); @@ -443,6 +450,8 @@ pub fn validator_list_defaults() { let expected = ListConfig { vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), + beacon_url: None, + validators_to_display: vec![], }; assert_eq!(expected, config); }); @@ -468,3 +477,106 @@ pub fn validator_delete_defaults() { assert_eq!(expected, config); }); } + +#[test] +pub fn validator_delete_missing_validator_flag() { + CommandLineTest::validators_delete() + .flag("--vc-token", Some("./token.json")) + .assert_failed(); +} + +#[test] +pub fn validator_exit_defaults() { + CommandLineTest::validators_exit() + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--vc-token", Some("./token.json")) + .flag("--beacon-node", Some("http://localhost:5052")) + .assert_success(|config| { + let expected = ExitConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + validators_to_exit: vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ], + beacon_url: Some(SensitiveUrl::parse("http://localhost:5052").unwrap()), + exit_epoch: None, + presign: false, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_exit_exit_epoch_and_presign_flags() { + CommandLineTest::validators_exit() + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--vc-token", Some("./token.json")) + .flag("--exit-epoch", Some("1234567")) + .flag("--presign", None) + .assert_success(|config| { + let expected = ExitConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + validators_to_exit: vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ], + beacon_url: None, + exit_epoch: Some(Epoch::new(1234567)), + presign: true, + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_exit_missing_validator_flag() { + CommandLineTest::validators_exit() + .flag("--vc-token", Some("./token.json")) + .assert_failed(); +} + +#[test] +pub fn validator_exit_using_beacon_and_presign_flags() { + CommandLineTest::validators_exit() + .flag("--vc-token", Some("./token.json")) + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--beacon-node", Some("http://localhost:1001")) + .flag("--presign", None) + .assert_failed(); +} + +#[test] +pub fn validator_exit_using_beacon_and_exit_epoch_flags() { + CommandLineTest::validators_exit() + .flag("--vc-token", Some("./token.json")) + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--beacon-node", Some("http://localhost:1001")) + .flag("--exit-epoch", Some("1234567")) + .assert_failed(); +} + +#[test] +pub fn validator_exit_exit_epoch_flag_without_presign_flag() { + CommandLineTest::validators_exit() + .flag("--vc-token", Some("./token.json")) + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--exit-epoch", Some("1234567")) + .assert_failed(); +} diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 8c23f79fd3..feb71c3a46 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -26,6 +26,7 @@ use std::time::Duration; use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; use tokio::sync::oneshot; +use types::ChainSpec; use validator_services::block_service::BlockService; use zeroize::Zeroizing; @@ -61,6 +62,7 @@ pub struct ApiTester { pub _server_shutdown: oneshot::Sender<()>, pub validator_dir: TempDir, pub secrets_dir: TempDir, + pub spec: Arc, } impl ApiTester { @@ -69,6 +71,19 @@ impl ApiTester { } pub async fn new_with_http_config(http_config: HttpConfig) -> Self { + let slot_clock = + TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let genesis_validators_root = Hash256::repeat_byte(42); + let spec = Arc::new(E::default_spec()); + Self::new_with_options(http_config, slot_clock, genesis_validators_root, spec).await + } + + pub async fn new_with_options( + http_config: HttpConfig, + slot_clock: TestingSlotClock, + genesis_validators_root: Hash256, + spec: Arc, + ) -> Self { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); let token_path = tempdir().unwrap().path().join(PK_FILENAME); @@ -91,20 +106,15 @@ impl ApiTester { ..Default::default() }; - let spec = Arc::new(E::default_spec()); - let slashing_db_path = validator_dir.path().join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); - let slot_clock = - TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); - let test_runtime = TestRuntime::default(); let validator_store = Arc::new(LighthouseValidatorStore::new( initialized_validators, slashing_protection, - Hash256::repeat_byte(42), + genesis_validators_root, spec.clone(), Some(Arc::new(DoppelgangerService::default())), slot_clock.clone(), @@ -127,7 +137,7 @@ impl ApiTester { validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), - spec, + spec: spec.clone(), config: http_config, sse_logging_components: None, slot_clock, @@ -161,6 +171,7 @@ impl ApiTester { _server_shutdown: shutdown_tx, validator_dir, secrets_dir, + spec, } } diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 7cb05616f4..9192f0e86b 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -17,12 +17,15 @@ ethereum_serde_utils = { workspace = true } hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +slot_clock = { workspace = true } tokio = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } zeroize = { workspace = true } [dev-dependencies] +beacon_chain = { workspace = true } +http_api = { workspace = true } regex = { workspace = true } tempfile = { workspace = true } validator_http_api = { workspace = true } diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index 5ef647c5af..cb0557427c 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -45,7 +45,10 @@ pub fn cli_app() -> Command { Arg::new(VALIDATOR_FLAG) .long(VALIDATOR_FLAG) .value_name("STRING") - .help("Comma-separated list of validators (pubkey) that will be deleted.") + .help( + "Comma-separated list of validators (pubkey) that will be deleted. \ + To delete all validators, use the keyword \"all\".", + ) .action(ArgAction::Set) .required(true) .display_order(0), @@ -64,10 +67,14 @@ impl DeleteConfig { let validators_to_delete_str = clap_utils::parse_required::(matches, VALIDATOR_FLAG)?; - let validators_to_delete = validators_to_delete_str - .split(',') - .map(|s| s.trim().parse()) - .collect::, _>>()?; + let validators_to_delete = if validators_to_delete_str.trim() == "all" { + Vec::new() + } else { + validators_to_delete_str + .split(',') + .map(|s| s.trim().parse()) + .collect::, _>>()? + }; Ok(Self { vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, @@ -90,11 +97,16 @@ async fn run(config: DeleteConfig) -> Result<(), String> { let DeleteConfig { vc_url, vc_token_path, - validators_to_delete, + mut validators_to_delete, } = config; let (http_client, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + // Delete all validators on the VC + if validators_to_delete.is_empty() { + validators_to_delete = validators.iter().map(|v| v.validating_pubkey).collect(); + } + for validator_to_delete in &validators_to_delete { if !validators .iter() diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs new file mode 100644 index 0000000000..30d8c5c47d --- /dev/null +++ b/validator_manager/src/exit_validators.rs @@ -0,0 +1,585 @@ +use crate::{common::vc_http_client, DumpConfig}; + +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; +use eth2::types::{ConfigAndPreset, Epoch, StateId, ValidatorId, ValidatorStatus}; +use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use serde::{Deserialize, Serialize}; +use serde_json; +use slot_clock::{SlotClock, SystemTimeSlotClock}; +use std::fs::write; +use std::path::PathBuf; +use std::time::Duration; +use types::{ChainSpec, EthSpec, PublicKeyBytes}; + +pub const CMD: &str = "exit"; +pub const BEACON_URL_FLAG: &str = "beacon-node"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const VALIDATOR_FLAG: &str = "validators"; +pub const EXIT_EPOCH_FLAG: &str = "exit-epoch"; +pub const PRESIGN_FLAG: &str = "presign"; + +pub fn cli_app() -> Command { + Command::new(CMD) + .about( + "Exits one or more validators using the HTTP API. It can \ + also be used to generate a presigned voluntary exit message for a particular future epoch.", + ) + .arg( + Arg::new(BEACON_URL_FLAG) + .long(BEACON_URL_FLAG) + .value_name("NETWORK_ADDRESS") + .help("Address to a beacon node HTTP API") + .action(ArgAction::Set) + .display_order(0) + .conflicts_with(PRESIGN_FLAG), + ) + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VALIDATOR_FLAG) + .long(VALIDATOR_FLAG) + .value_name("STRING") + .help( + "Comma-separated list of validators (pubkey) to exit. \ + To exit all validators, use the keyword \"all\".", + ) + .action(ArgAction::Set) + .required(true) + .display_order(0), + ) + .arg( + Arg::new(EXIT_EPOCH_FLAG) + .long(EXIT_EPOCH_FLAG) + .value_name("EPOCH") + .help( + "Provide the minimum epoch for processing voluntary exit. \ + This flag is required to be used in combination with `--presign` to \ + save the voluntary exit presign to a file for future use.", + ) + .action(ArgAction::Set) + .display_order(0) + .requires(PRESIGN_FLAG) + .conflicts_with(BEACON_URL_FLAG), + ) + .arg( + Arg::new(PRESIGN_FLAG) + .long(PRESIGN_FLAG) + .help( + "Generate the voluntary exit presign and save it to a file \ + named {validator_pubkey}.json. Note: Using this without the \ + `--beacon-node` flag will not publish the voluntary exit to the network.", + ) + .help_heading(FLAG_HEADER) + .action(ArgAction::SetTrue) + .display_order(0) + .conflicts_with(BEACON_URL_FLAG), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct ExitConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, + pub validators_to_exit: Vec, + pub beacon_url: Option, + pub exit_epoch: Option, + pub presign: bool, +} + +impl ExitConfig { + fn from_cli(matches: &ArgMatches) -> Result { + let validators_to_exit_str = clap_utils::parse_required::(matches, VALIDATOR_FLAG)?; + + // Keyword "all" to exit all validators, vector to be created later + let validators_to_exit = if validators_to_exit_str.trim() == "all" { + Vec::new() + } else { + validators_to_exit_str + .split(',') + .map(|s| s.trim().parse()) + .collect::, _>>()? + }; + + Ok(Self { + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + validators_to_exit, + beacon_url: clap_utils::parse_optional(matches, BEACON_URL_FLAG)?, + exit_epoch: clap_utils::parse_optional(matches, EXIT_EPOCH_FLAG)?, + presign: matches.get_flag(PRESIGN_FLAG), + }) + } +} + +pub async fn cli_run( + matches: &ArgMatches, + dump_config: DumpConfig, +) -> Result<(), String> { + let config = ExitConfig::from_cli(matches)?; + + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run::(config).await + } +} + +async fn run(config: ExitConfig) -> Result<(), String> { + let ExitConfig { + vc_url, + vc_token_path, + mut validators_to_exit, + beacon_url, + exit_epoch, + presign, + } = config; + + let (http_client, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + if validators_to_exit.is_empty() { + validators_to_exit = validators.iter().map(|v| v.validating_pubkey).collect(); + } + + for validator_to_exit in validators_to_exit { + // Check that the validators_to_exit is in the validator client + if !validators + .iter() + .any(|validator| validator.validating_pubkey == validator_to_exit) + { + return Err(format!("Validator {} doesn't exist", validator_to_exit)); + } + + let exit_message = http_client + .post_validator_voluntary_exit(&validator_to_exit, exit_epoch) + .await + .map_err(|e| format!("Failed to generate voluntary exit message: {}", e))?; + + if presign { + let exit_message_json = serde_json::to_string(&exit_message.data); + + match exit_message_json { + Ok(json) => { + // Save the exit message to JSON file(s) + let file_path = format!("{}.json", validator_to_exit); + write(&file_path, json).map_err(|e| { + format!("Failed to write voluntary exit message to file: {}", e) + })?; + println!("Voluntary exit message saved to {}", file_path); + } + Err(e) => eprintln!("Failed to serialize voluntary exit message: {}", e), + } + } + + // Only publish the voluntary exit if the --beacon-node flag is present + if let Some(ref beacon_url) = beacon_url { + let beacon_node = BeaconNodeHttpClient::new( + SensitiveUrl::parse(beacon_url.as_ref()) + .map_err(|e| format!("Failed to parse beacon http server: {:?}", e))?, + Timeouts::set_all(Duration::from_secs(12)), + ); + + if beacon_node + .get_node_syncing() + .await + .map_err(|e| format!("Failed to get beacon node sync status: {:?}", e))? + .data + .is_syncing + { + return Err( + "Beacon node is syncing, submit the voluntary exit later when beacon node is synced" + .to_string(), + ); + } + + let genesis_data = beacon_node + .get_beacon_genesis() + .await + .map_err(|e| format!("Failed to get genesis data: {}", e))? + .data; + + let config_and_preset = beacon_node + .get_config_spec::() + .await + .map_err(|e| format!("Failed to get config spec: {}", e))? + .data; + + let spec = ChainSpec::from_config::(config_and_preset.config()) + .ok_or("Failed to create chain spec")?; + + let validator_data = beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(validator_to_exit), + ) + .await + .map_err(|e| format!("Failed to get validator details: {:?}", e))? + .ok_or_else(|| { + format!( + "Validator {} is not present in the beacon state. \ + Please ensure that your beacon node is synced \ + and the validator has been deposited.", + validator_to_exit + ) + })? + .data; + + let activation_epoch = validator_data.validator.activation_epoch; + let current_epoch = get_current_epoch::(genesis_data.genesis_time, &spec) + .ok_or("Failed to get current epoch. Please check your system time")?; + + // Check if validator is eligible for exit + if validator_data.status == ValidatorStatus::ActiveOngoing + && current_epoch < activation_epoch + spec.shard_committee_period + { + eprintln!( + "Validator {} is not eligible for exit. It will become eligible at epoch {}", + validator_to_exit, + activation_epoch + spec.shard_committee_period + ) + } else if validator_data.status != ValidatorStatus::ActiveOngoing { + eprintln!( + "Validator {} is not eligible for exit. Validator status is: {:?}", + validator_to_exit, validator_data.status + ) + } else { + // Only publish voluntary exit if validator status is ActiveOngoing + beacon_node + .post_beacon_pool_voluntary_exits(&exit_message.data) + .await + .map_err(|e| format!("Failed to publish voluntary exit: {}", e))?; + eprintln!( + "Successfully validated and published voluntary exit for validator {}", + validator_to_exit + ); + } + } + } + + Ok(()) +} + +pub fn get_current_epoch(genesis_time: u64, spec: &ChainSpec) -> Option { + let slot_clock = SystemTimeSlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_time), + Duration::from_secs(spec.seconds_per_slot), + ); + slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use account_utils::eth2_keystore::KeystoreBuilder; + use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; + use eth2::lighthouse_vc::types::KeystoreJsonStr; + use http_api::test_utils::InteractiveTester; + use std::{ + fs::{self, File}, + io::Write, + sync::Arc, + }; + use types::{ChainSpec, MainnetEthSpec}; + use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + use zeroize::Zeroizing; + type E = MainnetEthSpec; + + struct TestBuilder { + exit_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + beacon_node: InteractiveTester, + index_of_validators_to_exit: Vec, + spec: Arc, + } + + impl TestBuilder { + async fn new() -> Self { + let mut spec = ChainSpec::mainnet(); + spec.shard_committee_period = 1; + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(1)); + spec.capella_fork_epoch = Some(Epoch::new(2)); + spec.deneb_fork_epoch = Some(Epoch::new(3)); + + let beacon_node = InteractiveTester::new(Some(spec.clone()), 64).await; + + let harness = &beacon_node.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + let execution_ctx = mock_el.server.ctx.clone(); + + // Move to terminal block. + mock_el.server.all_payloads_valid(); + execution_ctx + .execution_block_generator + .write() + .move_to_terminal_block() + .unwrap(); + + Self { + exit_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + beacon_node, + index_of_validators_to_exit: vec![], + spec: spec.into(), + } + } + + async fn with_validators(mut self, index_of_validators_to_exit: Vec) -> Self { + // Ensure genesis validators root matches the beacon node. + let genesis_validators_root = self + .beacon_node + .harness + .get_current_state() + .genesis_validators_root(); + // And use a single slot clock and same spec for BN and VC to keep things simple. + let slot_clock = self.beacon_node.harness.chain.slot_clock.clone(); + let vc = ApiTester::new_with_options( + self.http_config.clone(), + slot_clock, + genesis_validators_root, + self.spec.clone(), + ) + .await; + let mut builder = ImportTestBuilder::new_with_vc(vc).await; + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = index_of_validators_to_exit + .iter() + .map(|&index| { + let keystore = KeystoreBuilder::new( + &self.beacon_node.harness.validator_keypairs[index], + "password".as_bytes(), + "".into(), + ) + .unwrap() + .build() + .unwrap(); + + ValidatorSpecification { + voting_keystore: KeystoreJsonStr(keystore), + voting_keystore_password: Zeroizing::new("password".into()), + slashing_protection: None, + fee_recipient: None, + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + enabled: Some(true), + } + }) + .collect(); + + let beacon_url = SensitiveUrl::parse(self.beacon_node.client.as_ref()).unwrap(); + + let validators_to_exit = index_of_validators_to_exit + .iter() + .map(|&index| { + self.beacon_node.harness.validator_keypairs[index] + .pk + .clone() + .into() + }) + .collect(); + + let import_config = builder.get_import_config(); + + let validators_dir = import_config.vc_token_path.parent().unwrap(); + let validators_file = validators_dir.join("validators.json"); + + builder = builder.mutate_import_config(|config| { + config.validators_file_path = Some(validators_file.clone()); + }); + + fs::write( + &validators_file, + serde_json::to_string(&local_validators).unwrap(), + ) + .unwrap(); + + self.exit_config = Some(ExitConfig { + vc_url: import_config.vc_url, + vc_token_path: import_config.vc_token_path, + validators_to_exit, + beacon_url: Some(beacon_url), + exit_epoch: None, + presign: false, + }); + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self.index_of_validators_to_exit = index_of_validators_to_exit; + self + } + + pub async fn run_test(self) -> TestResult { + let import_builder = self.src_import_builder.unwrap(); + let initialized_validators = import_builder.vc.initialized_validators.clone(); + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + + // only assign the validator index after validator is imported to the VC + for &index in &self.index_of_validators_to_exit { + initialized_validators.write().set_index( + &self.beacon_node.harness.validator_keypairs[index] + .pk + .compress(), + index as u64, + ); + } + + let path = self.exit_config.clone().unwrap().vc_token_path; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path.clone()) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + // Advance beacon chain + self.beacon_node.harness.advance_slot(); + + self.beacon_node + .harness + .extend_chain( + 100, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let result = run::(self.exit_config.clone().unwrap()).await; + + self.beacon_node.harness.advance_slot(); + + self.beacon_node + .harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let validator_data = self + .index_of_validators_to_exit + .iter() + .map(|&index| { + self.beacon_node + .harness + .get_current_state() + .get_validator(index) + .unwrap() + .clone() + }) + .collect::>(); + + let validator_exit_epoch = validator_data + .iter() + .map(|validator| validator.exit_epoch) + .collect::>(); + + let validator_withdrawable_epoch = validator_data + .iter() + .map(|validator| validator.withdrawable_epoch) + .collect::>(); + + let current_epoch = self.beacon_node.harness.get_current_state().current_epoch(); + let max_seed_lookahead = self.beacon_node.harness.spec.max_seed_lookahead; + let min_withdrawability_delay = self + .beacon_node + .harness + .spec + .min_validator_withdrawability_delay; + + // As per the spec: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#compute_activation_exit_epoch + let beacon_exit_epoch = current_epoch + 1 + max_seed_lookahead; + let beacon_withdrawable_epoch = beacon_exit_epoch + min_withdrawability_delay; + + assert!(validator_exit_epoch + .iter() + .all(|&epoch| epoch == beacon_exit_epoch)); + + assert!(validator_withdrawable_epoch + .iter() + .all(|&epoch| epoch == beacon_withdrawable_epoch)); + + if result.is_ok() { + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn exit_single_validator() { + TestBuilder::new() + .await + .with_validators(vec![0]) + .await + .run_test() + .await + .assert_ok(); + } + + #[tokio::test] + async fn exit_multiple_validators() { + TestBuilder::new() + .await + .with_validators(vec![10, 20, 30]) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 6cfbf7b54e..e5047f3f37 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -404,8 +404,12 @@ pub mod tests { } pub async fn new_with_http_config(http_config: HttpConfig) -> Self { - let dir = tempdir().unwrap(); let vc = ApiTester::new_with_http_config(http_config).await; + Self::new_with_vc(vc).await + } + + pub async fn new_with_vc(vc: ApiTester) -> Self { + let dir = tempdir().unwrap(); let vc_token_path = dir.path().join(VC_TOKEN_FILE_NAME); fs::write(&vc_token_path, &vc.api_token).unwrap(); diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 9beccd3bde..fb74779304 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -9,6 +9,7 @@ use types::EthSpec; pub mod common; pub mod create_validators; pub mod delete_validators; +pub mod exit_validators; pub mod import_validators; pub mod list_validators; pub mod move_validators; @@ -51,6 +52,7 @@ pub fn cli_app() -> Command { .subcommand(move_validators::cli_app()) .subcommand(list_validators::cli_app()) .subcommand(delete_validators::cli_app()) + .subcommand(exit_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. @@ -79,11 +81,14 @@ pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), move_validators::cli_run(matches, dump_config).await } Some((list_validators::CMD, matches)) => { - list_validators::cli_run(matches, dump_config).await + list_validators::cli_run::(matches, dump_config).await } Some((delete_validators::CMD, matches)) => { delete_validators::cli_run(matches, dump_config).await } + Some((exit_validators::CMD, matches)) => { + exit_validators::cli_run::(matches, dump_config).await + } Some(("", _)) => Err("No command supplied. See --help.".to_string()), Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index a0a1c5fb40..6016b89eea 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -1,14 +1,20 @@ use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::lighthouse_vc::types::SingleKeystoreResponse; -use eth2::SensitiveUrl; +use eth2::types::{ConfigAndPreset, StateId, ValidatorId, ValidatorStatus}; +use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; +use std::time::Duration; +use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use crate::exit_validators::get_current_epoch; use crate::{common::vc_http_client, DumpConfig}; pub const CMD: &str = "list"; pub const VC_URL_FLAG: &str = "vc-url"; pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const BEACON_URL_FLAG: &str = "beacon-node"; +pub const VALIDATOR_FLAG: &str = "validators"; pub fn cli_app() -> Command { Command::new(CMD) @@ -31,47 +37,177 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0), ) + .arg( + Arg::new(BEACON_URL_FLAG) + .long(BEACON_URL_FLAG) + .value_name("NETWORK_ADDRESS") + .help( + "Address to a beacon node HTTP API. When supplied, \ + the status of validators (with regard to voluntary exit) \ + will be displayed. This flag is to be used together with \ + the --validators flag.", + ) + .action(ArgAction::Set) + .display_order(0) + .requires(VALIDATOR_FLAG), + ) + .arg( + Arg::new(VALIDATOR_FLAG) + .long(VALIDATOR_FLAG) + .value_name("STRING") + .help( + "Comma-separated list of validators (pubkey) to display status for. \ + To display the status for all validators, use the keyword \"all\". \ + This flag is to be used together with the --beacon-node flag.", + ) + .action(ArgAction::Set) + .display_order(0) + .requires(BEACON_URL_FLAG), + ) } #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct ListConfig { pub vc_url: SensitiveUrl, pub vc_token_path: PathBuf, + pub beacon_url: Option, + pub validators_to_display: Vec, } impl ListConfig { fn from_cli(matches: &ArgMatches) -> Result { + let validators_to_display_str = + clap_utils::parse_optional::(matches, VALIDATOR_FLAG)?; + + // Keyword "all" to list all validators, vector to be created later + let validators_to_display = match validators_to_display_str { + Some(str) => { + if str.trim() == "all" { + Vec::new() + } else { + str.split(',') + .map(|s| s.trim().parse()) + .collect::, _>>()? + } + } + None => Vec::new(), + }; + Ok(Self { vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + beacon_url: clap_utils::parse_optional(matches, BEACON_URL_FLAG)?, + validators_to_display, }) } } -pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { +pub async fn cli_run( + matches: &ArgMatches, + dump_config: DumpConfig, +) -> Result<(), String> { let config = ListConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) } else { - run(config).await?; + run::(config).await?; Ok(()) } } -async fn run(config: ListConfig) -> Result, String> { +async fn run(config: ListConfig) -> Result, String> { let ListConfig { vc_url, vc_token_path, + beacon_url, + mut validators_to_display, } = config; let (_, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; println!("List of validators ({}):", validators.len()); - for validator in &validators { - println!("{}", validator.validating_pubkey); + if validators_to_display.is_empty() { + validators_to_display = validators.iter().map(|v| v.validating_pubkey).collect(); } + if let Some(ref beacon_url) = beacon_url { + for validator in &validators_to_display { + let beacon_node = BeaconNodeHttpClient::new( + SensitiveUrl::parse(beacon_url.as_ref()) + .map_err(|e| format!("Failed to parse beacon http server: {:?}", e))?, + Timeouts::set_all(Duration::from_secs(12)), + ); + + let validator_data = beacon_node + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(*validator)) + .await + .map_err(|e| format!("Failed to get updated validator details: {:?}", e))? + .ok_or_else(|| { + format!("Validator {} is not present in the beacon state", validator) + })? + .data; + + match validator_data.status { + ValidatorStatus::ActiveExiting => { + let exit_epoch = validator_data.validator.exit_epoch; + let withdrawal_epoch = validator_data.validator.withdrawable_epoch; + + let genesis_data = beacon_node + .get_beacon_genesis() + .await + .map_err(|e| format!("Failed to get genesis data: {}", e))? + .data; + + let config_and_preset = beacon_node + .get_config_spec::() + .await + .map_err(|e| format!("Failed to get config spec: {}", e))? + .data; + + let spec = ChainSpec::from_config::(config_and_preset.config()) + .ok_or("Failed to create chain spec")?; + + let current_epoch = get_current_epoch::(genesis_data.genesis_time, &spec) + .ok_or("Failed to get current epoch. Please check your system time")?; + + eprintln!( + "Voluntary exit for validator {} has been accepted into the beacon chain. \ + Note that the voluntary exit is subject chain finalization. \ + Before the chain has finalized, there is a low \ + probability that the exit may be reverted.", + validator + ); + eprintln!( + "Current epoch: {}, Exit epoch: {}, Withdrawable epoch: {}", + current_epoch, exit_epoch, withdrawal_epoch + ); + eprintln!("Please keep your validator running till exit epoch"); + eprintln!( + "Exit epoch in approximately {} secs", + (exit_epoch - current_epoch) * spec.seconds_per_slot * E::slots_per_epoch() + ); + } + ValidatorStatus::ExitedSlashed | ValidatorStatus::ExitedUnslashed => { + eprintln!( + "Validator {} has exited at epoch: {}", + validator, validator_data.validator.exit_epoch + ); + } + _ => { + eprintln!( + "Validator {} has not initiated voluntary exit or the voluntary exit \ + is yet to be accepted into the beacon chain. Validator status is: {}", + validator, validator_data.status + ) + } + } + } + } else { + for validator in &validators { + println!("{}", validator.validating_pubkey); + } + } Ok(validators) } @@ -87,7 +223,9 @@ mod test { use crate::{ common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, }; + use types::MainnetEthSpec; use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + type E = MainnetEthSpec; struct TestBuilder { list_config: Option, @@ -116,6 +254,8 @@ mod test { self.list_config = Some(ListConfig { vc_url: builder.get_import_config().vc_url, vc_token_path: builder.get_import_config().vc_token_path, + beacon_url: None, + validators_to_display: vec![], }); self.vc_token = @@ -152,7 +292,7 @@ mod test { .write_all(self.vc_token.clone().unwrap().as_bytes()) .unwrap(); - let result = run(self.list_config.clone().unwrap()).await; + let result = run::(self.list_config.clone().unwrap()).await; if result.is_ok() { let result_ref = result.as_ref().unwrap(); diff --git a/wordlist.txt b/wordlist.txt index 3c7070c642..ada0384d36 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -196,6 +196,8 @@ pem performant pid pre +presign +presigned pubkey pubkeys rc From e305cb1b921f544dfd05f769aeb7e37cd482933f Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 30 Jun 2025 23:06:37 -0700 Subject: [PATCH 12/44] Custody persist fix (#7661) N/A Persist the epoch -> cgc values. This is to ensure that `ValidatorRegistrations::latest_validator_custody_requirement` always returns a `Some` value post restart assuming the `epoch_validator_custody_requirements` map has been updated in the previous runs. --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 + .../beacon_chain/src/persisted_custody.rs | 2 +- beacon_node/beacon_chain/src/schema_change.rs | 9 ++ .../src/schema_change/migration_schema_v26.rs | 91 +++++++++++++++++++ .../beacon_chain/src/validator_custody.rs | 20 +++- .../beacon_chain/tests/schema_stability.rs | 15 +-- beacon_node/store/src/metadata.rs | 2 +- 7 files changed, 131 insertions(+), 12 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index de377dab97..65318835cc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -654,6 +654,10 @@ impl BeaconChain { /// Persists the custody information to disk. pub fn persist_custody_context(&self) -> Result<(), Error> { + if !self.spec.is_peer_das_scheduled() { + return Ok(()); + } + let custody_context: CustodyContextSsz = self .data_availability_checker .custody_context() diff --git a/beacon_node/beacon_chain/src/persisted_custody.rs b/beacon_node/beacon_chain/src/persisted_custody.rs index 6ede473b36..b685ea36b7 100644 --- a/beacon_node/beacon_chain/src/persisted_custody.rs +++ b/beacon_node/beacon_chain/src/persisted_custody.rs @@ -7,7 +7,7 @@ use types::{EthSpec, Hash256}; /// 32-byte key for accessing the `CustodyContext`. All zero because `CustodyContext` has its own column. pub const CUSTODY_DB_KEY: Hash256 = Hash256::ZERO; -pub struct PersistedCustody(CustodyContextSsz); +pub struct PersistedCustody(pub CustodyContextSsz); pub fn load_custody_context, Cold: ItemStore>( store: Arc>, diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 0abb48494a..317b89cbdd 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,6 +2,7 @@ mod migration_schema_v23; mod migration_schema_v24; mod migration_schema_v25; +mod migration_schema_v26; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; @@ -58,6 +59,14 @@ pub fn migrate_schema( let ops = migration_schema_v25::downgrade_from_v25()?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(25), SchemaVersion(26)) => { + let ops = migration_schema_v26::upgrade_to_v26::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(26), SchemaVersion(25)) => { + let ops = migration_schema_v26::downgrade_from_v26::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs new file mode 100644 index 0000000000..2e2a6bdc4f --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs @@ -0,0 +1,91 @@ +use crate::persisted_custody::{PersistedCustody, CUSTODY_DB_KEY}; +use crate::validator_custody::CustodyContextSsz; +use crate::BeaconChainTypes; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::sync::Arc; +use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use tracing::info; + +#[derive(Debug, Encode, Decode, Clone)] +pub(crate) struct CustodyContextSszV24 { + pub(crate) validator_custody_at_head: u64, + pub(crate) persisted_is_supernode: bool, +} + +pub(crate) struct PersistedCustodyV24(CustodyContextSszV24); + +impl StoreItem for PersistedCustodyV24 { + fn db_column() -> DBColumn { + DBColumn::CustodyContext + } + + fn as_store_bytes(&self) -> Vec { + self.0.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + let custody_context = CustodyContextSszV24::from_ssz_bytes(bytes)?; + Ok(PersistedCustodyV24(custody_context)) + } +} + +/// Upgrade the `CustodyContext` entry to v26. +pub fn upgrade_to_v26( + db: Arc>, +) -> Result, Error> { + let ops = if db.spec.is_peer_das_scheduled() { + match db.get_item::(&CUSTODY_DB_KEY) { + Ok(Some(PersistedCustodyV24(ssz_v24))) => { + info!("Migrating `CustodyContext` to v26 schema"); + let custody_context_v2 = CustodyContextSsz { + validator_custody_at_head: ssz_v24.validator_custody_at_head, + persisted_is_supernode: ssz_v24.persisted_is_supernode, + epoch_validator_custody_requirements: vec![], + }; + vec![KeyValueStoreOp::PutKeyValue( + DBColumn::CustodyContext, + CUSTODY_DB_KEY.as_slice().to_vec(), + PersistedCustody(custody_context_v2).as_store_bytes(), + )] + } + _ => { + vec![] + } + } + } else { + // Delete it from db if PeerDAS hasn't been scheduled + vec![KeyValueStoreOp::DeleteKey( + DBColumn::CustodyContext, + CUSTODY_DB_KEY.as_slice().to_vec(), + )] + }; + + Ok(ops) +} + +pub fn downgrade_from_v26( + db: Arc>, +) -> Result, Error> { + let res = db.get_item::(&CUSTODY_DB_KEY); + let ops = match res { + Ok(Some(PersistedCustody(ssz_v26))) => { + info!("Migrating `CustodyContext` back from v26 schema"); + let custody_context_v24 = CustodyContextSszV24 { + validator_custody_at_head: ssz_v26.validator_custody_at_head, + persisted_is_supernode: ssz_v26.persisted_is_supernode, + }; + vec![KeyValueStoreOp::PutKeyValue( + DBColumn::CustodyContext, + CUSTODY_DB_KEY.as_slice().to_vec(), + PersistedCustodyV24(custody_context_v24).as_store_bytes(), + )] + } + _ => { + // no op if it's not on the db, as previous versions gracefully handle data missing from disk. + vec![] + } + }; + + Ok(ops) +} diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 1169b64537..5f037fabf3 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -163,7 +163,13 @@ impl CustodyContext { validator_custody_count: AtomicU64::new(ssz_context.validator_custody_at_head), current_is_supernode: is_supernode, persisted_is_supernode: ssz_context.persisted_is_supernode, - validator_registrations: Default::default(), + validator_registrations: RwLock::new(ValidatorRegistrations { + validators: Default::default(), + epoch_validator_custody_requirements: ssz_context + .epoch_validator_custody_requirements + .into_iter() + .collect(), + }), } } @@ -263,8 +269,9 @@ pub struct CustodyCountChanged { /// The custody information that gets persisted across runs. #[derive(Debug, Encode, Decode, Clone)] pub struct CustodyContextSsz { - validator_custody_at_head: u64, - persisted_is_supernode: bool, + pub validator_custody_at_head: u64, + pub persisted_is_supernode: bool, + pub epoch_validator_custody_requirements: Vec<(Epoch, u64)>, } impl From<&CustodyContext> for CustodyContextSsz { @@ -272,6 +279,13 @@ impl From<&CustodyContext> for CustodyContextSsz { CustodyContextSsz { validator_custody_at_head: context.validator_custody_count.load(Ordering::Relaxed), persisted_is_supernode: context.persisted_is_supernode, + epoch_validator_custody_requirements: context + .validator_registrations + .read() + .epoch_validator_custody_requirements + .iter() + .map(|(epoch, count)| (*epoch, *count)) + .collect(), } } } diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 00d75a554d..fc37a1159b 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -88,7 +88,7 @@ async fn schema_stability() { check_db_columns(); check_metadata_sizes(&store); check_op_pool(&store); - check_custody_context(&store); + check_custody_context(&store, &harness.spec); check_persisted_chain(&store); // Not covered here: @@ -134,12 +134,13 @@ fn check_op_pool(store: &Store) { assert_eq!(op_pool.as_store_bytes().len(), 28); } -fn check_custody_context(store: &Store) { - let custody_context = store - .get_item::(&Hash256::ZERO) - .unwrap() - .unwrap(); - assert_eq!(custody_context.as_store_bytes().len(), 9); +fn check_custody_context(store: &Store, spec: &ChainSpec) { + let custody_context_opt = store.get_item::(&Hash256::ZERO).unwrap(); + if spec.is_peer_das_scheduled() { + assert_eq!(custody_context_opt.unwrap().as_store_bytes().len(), 13); + } else { + assert!(custody_context_opt.is_none()); + } } fn check_persisted_chain(store: &Store) { diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 63cb4661cd..39a46451fc 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(25); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(26); // All the keys that get stored under the `BeaconMeta` column. // From 41742ce2bde924e4dc6684b430815ca1895ae225 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 2 Jul 2025 10:08:40 +1000 Subject: [PATCH 13/44] Update `SAMPLES_PER_SLOT` to be number of custody groups instead of data columns (#7683) Update `SAMPLES_PER_SLOT` to be number of custody groups instead of data columns. This should have no impact on the current implementation as config currently maintains a `group:subnet:column` ratio of `1:1:1`. **In short, this PR doesn't change anything for Fusaka, but ensures compliance with the spec and potential future changes.** I've added separate methods to compute sampling columns and custody groups for clarity: `spec.sampling_size_columns` and `spec.sampling_size_custod_groups` See the clarifications in this PR for more details: https://github.com/ethereum/consensus-specs/pull/4251 --- .../overflow_lru_cache.rs | 21 +++++---- beacon_node/beacon_chain/src/test_utils.rs | 2 +- .../beacon_chain/src/validator_custody.rs | 45 ++++++++++++++----- .../lighthouse_network/src/types/globals.rs | 18 +++++--- consensus/types/src/chain_spec.rs | 15 +++++-- 5 files changed, 71 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index deaea3eb24..3c1fd1e7bc 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -481,7 +481,8 @@ impl DataAvailabilityCheckerInner { if let Some(available_block) = pending_components.make_available( &self.spec, - self.custody_context.sampling_size(Some(epoch), &self.spec), + self.custody_context + .num_of_data_columns_to_sample(Some(epoch), &self.spec), |block| self.state_cache.recover_pending_executed_block(block), )? { // We keep the pending components in the availability cache during block import (#5845). @@ -526,7 +527,9 @@ impl DataAvailabilityCheckerInner { // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; - let num_expected_columns = self.custody_context.sampling_size(Some(epoch), &self.spec); + let num_expected_columns = self + .custody_context + .num_of_data_columns_to_sample(Some(epoch), &self.spec); debug!( component = "data_columns", ?block_root, @@ -622,7 +625,9 @@ impl DataAvailabilityCheckerInner { // Merge in the block. pending_components.merge_block(diet_executed_block); - let num_expected_columns = self.custody_context.sampling_size(Some(epoch), &self.spec); + let num_expected_columns = self + .custody_context + .num_of_data_columns_to_sample(Some(epoch), &self.spec); debug!( component = "block", ?block_root, @@ -631,11 +636,11 @@ impl DataAvailabilityCheckerInner { ); // Check if we have all components and entire set is consistent. - if let Some(available_block) = pending_components.make_available( - &self.spec, - self.custody_context.sampling_size(Some(epoch), &self.spec), - |block| self.state_cache.recover_pending_executed_block(block), - )? { + if let Some(available_block) = + pending_components.make_available(&self.spec, num_expected_columns, |block| { + self.state_cache.recover_pending_executed_block(block) + })? + { // We keep the pending components in the availability cache during block import (#5845). write_lock.put(block_root, pending_components); drop(write_lock); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index db4e2fab26..2c4981078d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -777,7 +777,7 @@ where self.chain .data_availability_checker .custody_context() - .sampling_size(None, &self.chain.spec) as usize + .num_of_data_columns_to_sample(None, &self.chain.spec) as usize } pub fn slots_per_epoch(&self) -> u64 { diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 5f037fabf3..7dc5b18ae4 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -215,7 +215,8 @@ impl CustodyContext { ); return Some(CustodyCountChanged { new_custody_group_count: updated_cgc, - sampling_count: self.sampling_size(Some(effective_epoch), spec), + sampling_count: self + .num_of_custody_groups_to_sample(Some(effective_epoch), spec), }); } } @@ -240,9 +241,13 @@ impl CustodyContext { } } - /// Returns the count of custody columns this node must sample for a block at `epoch` to import. - /// If an `epoch` is not specified, returns the *current* validator custody requirement. - pub fn sampling_size(&self, epoch_opt: Option, spec: &ChainSpec) -> u64 { + /// This function is used to determine the custody group count at a given epoch. + /// + /// This differs from the number of custody groups sampled per slot, as the spec requires a + /// minimum sampling size which may exceed the custody group count (CGC). + /// + /// See also: [`Self::num_of_custody_groups_to_sample`]. + fn custody_group_count_at_epoch(&self, epoch_opt: Option, spec: &ChainSpec) -> u64 { let custody_group_count = if self.current_is_supernode { spec.number_of_custody_groups } else if let Some(epoch) = epoch_opt { @@ -253,8 +258,26 @@ impl CustodyContext { } else { self.custody_group_count_at_head(spec) }; + custody_group_count + } - spec.sampling_size(custody_group_count) + /// Returns the count of custody groups this node must _sample_ for a block at `epoch` to import. + /// If an `epoch` is not specified, returns the *current* validator custody requirement. + pub fn num_of_custody_groups_to_sample( + &self, + epoch_opt: Option, + spec: &ChainSpec, + ) -> u64 { + let custody_group_count = self.custody_group_count_at_epoch(epoch_opt, spec); + spec.sampling_size_custody_groups(custody_group_count) + .expect("should compute node sampling size from valid chain spec") + } + + /// Returns the count of columns this node must _sample_ for a block at `epoch` to import. + /// If an `epoch` is not specified, returns the *current* validator custody requirement. + pub fn num_of_data_columns_to_sample(&self, epoch_opt: Option, spec: &ChainSpec) -> u64 { + let custody_group_count = self.custody_group_count_at_epoch(epoch_opt, spec); + spec.sampling_size_columns(custody_group_count) .expect("should compute node sampling size from valid chain spec") } } @@ -307,7 +330,7 @@ mod tests { spec.number_of_custody_groups ); assert_eq!( - custody_context.sampling_size(None, &spec), + custody_context.num_of_custody_groups_to_sample(None, &spec), spec.number_of_custody_groups ); } @@ -322,7 +345,7 @@ mod tests { "head custody count should be minimum spec custody requirement" ); assert_eq!( - custody_context.sampling_size(None, &spec), + custody_context.num_of_custody_groups_to_sample(None, &spec), spec.samples_per_slot ); } @@ -412,7 +435,7 @@ mod tests { register_validators_and_assert_cgc(&custody_context, validators_and_expected_cgc, &spec); assert_eq!( - custody_context.sampling_size(None, &spec), + custody_context.num_of_custody_groups_to_sample(None, &spec), spec.number_of_custody_groups ); } @@ -423,7 +446,7 @@ mod tests { let spec = E::default_spec(); let current_slot = Slot::new(10); let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let default_sampling_size = custody_context.sampling_size(None, &spec); + let default_sampling_size = custody_context.num_of_custody_groups_to_sample(None, &spec); let validator_custody_units = 10; let _cgc_changed = custody_context.register_validators::( @@ -437,12 +460,12 @@ mod tests { // CGC update is not applied for `current_epoch`. assert_eq!( - custody_context.sampling_size(Some(current_epoch), &spec), + custody_context.num_of_custody_groups_to_sample(Some(current_epoch), &spec), default_sampling_size ); // CGC update is applied for the next epoch. assert_eq!( - custody_context.sampling_size(Some(current_epoch + 1), &spec), + custody_context.num_of_custody_groups_to_sample(Some(current_epoch + 1), &spec), validator_custody_units ); } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index d1ed1c33b0..cc4d758b4a 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -66,7 +66,7 @@ impl NetworkGlobals { // The below `expect` calls will panic on start up if the chain spec config values used // are invalid let sampling_size = spec - .sampling_size(custody_group_count) + .sampling_size_custody_groups(custody_group_count) .expect("should compute node sampling size from valid chain spec"); let custody_groups = get_custody_groups(node_id, sampling_size, &spec) .expect("should compute node custody groups"); @@ -114,7 +114,7 @@ impl NetworkGlobals { // are invalid let sampling_size = self .spec - .sampling_size(custody_group_count) + .sampling_size_custody_groups(custody_group_count) .expect("should compute node sampling size from valid chain spec"); let custody_groups = get_custody_groups(self.local_enr().node_id().raw(), sampling_size, &self.spec) @@ -298,7 +298,13 @@ mod test { spec.fulu_fork_epoch = Some(Epoch::new(0)); let custody_group_count = spec.number_of_custody_groups / 2; - let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); + let sampling_size_custody_groups = spec + .sampling_size_custody_groups(custody_group_count) + .unwrap(); + let expected_sampling_subnet_count = sampling_size_custody_groups + * spec.data_column_sidecar_subnet_count + / spec.number_of_custody_groups; + let metadata = get_metadata(custody_group_count); let config = Arc::new(NetworkConfig::default()); @@ -310,7 +316,7 @@ mod test { ); assert_eq!( globals.sampling_subnets.read().len(), - subnet_sampling_size as usize + expected_sampling_subnet_count as usize ); } @@ -321,7 +327,7 @@ mod test { spec.fulu_fork_epoch = Some(Epoch::new(0)); let custody_group_count = spec.number_of_custody_groups / 2; - let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); + let expected_sampling_columns = spec.sampling_size_columns(custody_group_count).unwrap(); let metadata = get_metadata(custody_group_count); let config = Arc::new(NetworkConfig::default()); @@ -333,7 +339,7 @@ mod test { ); assert_eq!( globals.sampling_columns.read().len(), - subnet_sampling_size as usize + expected_sampling_columns as usize ); } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b4fd5afe87..38cd4b9217 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -720,17 +720,24 @@ impl ChainSpec { } /// Returns the number of column sidecars to sample per slot. - pub fn sampling_size(&self, custody_group_count: u64) -> Result { + pub fn sampling_size_columns(&self, custody_group_count: u64) -> Result { + let sampling_size_groups = self.sampling_size_custody_groups(custody_group_count)?; + let columns_per_custody_group = self .number_of_columns .safe_div(self.number_of_custody_groups) .map_err(|_| "number_of_custody_groups must be greater than 0")?; - let custody_column_count = columns_per_custody_group - .safe_mul(custody_group_count) + let sampling_size_columns = columns_per_custody_group + .safe_mul(sampling_size_groups) .map_err(|_| "Computing sampling size should not overflow")?; - Ok(std::cmp::max(custody_column_count, self.samples_per_slot)) + Ok(sampling_size_columns) + } + + /// Returns the number of custody groups to sample per slot. + pub fn sampling_size_custody_groups(&self, custody_group_count: u64) -> Result { + Ok(std::cmp::max(custody_group_count, self.samples_per_slot)) } pub fn all_data_column_sidecar_subnets(&self) -> impl Iterator { From 69c9c7038af79e5f2c538c7d023fce85114673f2 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 1 Jul 2025 19:38:22 -0700 Subject: [PATCH 14/44] Use prepare_beacon_proposer endpoint for validator custody registration (#7681) N/A This PR switches to using `prepare_beacon_proposer` instead of `beacon_committee_subscriptions` endpoint to register validators with the custody context. We currently use the `beacon_committee_subscriptions` endpoint for registering validators in the custody context. Using the subscriptions endpoint has a few disadvantages: 1. The lighthouse VC tries to optimise the number of calls it makes to this endpoint to reduce the load on the subscriptions endpoint. So we would be getting different a subset of the total number of validators in each call. This will lead to a ramp up of the validator custody units instead of a one time bump. For e.g. see these logs ``` Jun 30 22:36:05.012 DEBUG Validator count at head updated old_count: 0, new_count: 19 Jun 30 22:36:11.016 DEBUG Validator count at head updated old_count: 19, new_count: 24 Jun 30 22:36:17.017 DEBUG Validator count at head updated old_count: 24, new_count: 27 Jun 30 22:36:23.020 DEBUG Validator count at head updated old_count: 27, new_count: 32 Jun 30 22:36:29.016 DEBUG Validator count at head updated old_count: 32, new_count: 36 Jun 30 22:36:35.005 DEBUG Validator count at head updated old_count: 36, new_count: 42 Jun 30 22:36:41.014 DEBUG Validator count at head updated old_count: 42, new_count: 44 Jun 30 22:36:47.017 DEBUG Validator count at head updated old_count: 44, new_count: 46 Jun 30 22:36:53.007 DEBUG Validator count at head updated old_count: 46, new_count: 48 Jun 30 22:36:59.009 DEBUG Validator count at head updated old_count: 48, new_count: 49 Jun 30 22:37:05.014 DEBUG Validator count at head updated old_count: 49, new_count: 50 Jun 30 22:37:11.007 DEBUG Validator count at head updated old_count: 50, new_count: 53 Jun 30 22:37:17.007 DEBUG Validator count at head updated old_count: 53, new_count: 55 Jun 30 22:37:35.008 DEBUG Validator count at head updated old_count: 55, new_count: 58 Jun 30 22:37:41.007 DEBUG Validator count at head updated old_count: 58, new_count: 59 Jun 30 22:37:53.010 DEBUG Validator count at head updated old_count: 59, new_count: 60 Jun 30 22:38:05.013 DEBUG Validator count at head updated old_count: 60, new_count: 61 Jun 30 22:38:23.006 DEBUG Validator count at head updated old_count: 61, new_count: 62 Jun 30 22:38:29.009 DEBUG Validator count at head updated old_count: 62, new_count: 63 Jun 30 22:38:41.009 DEBUG Validator count at head updated old_count: 63, new_count: 64 ``` 2. Different VCs would probably have different behaviours in terms of sending subscriptions In contrast, the `prepare_beacon_proposer` endpoint usage would be more standard across different VCs without any filtering of validators. Not doing so could mean potentially missing proposals so VCs are incentivised to make this call on any change in the validators managed by them. Lighthouse calls this endpoint every slot. --- beacon_node/http_api/src/lib.rs | 76 ++++++++++++++++----------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a627fb0353..c757ca035b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3721,13 +3721,11 @@ pub fn serve( .and(warp::path::end()) .and(warp_utils::json::json()) .and(validator_subscription_tx_filter.clone()) - .and(network_tx_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( |committee_subscriptions: Vec, validator_subscription_tx: Sender, - network_tx: UnboundedSender>, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { @@ -3761,42 +3759,6 @@ pub fn serve( )); } - if chain.spec.is_peer_das_scheduled() { - let (finalized_beacon_state, _, _) = - StateId(CoreStateId::Finalized).state(&chain)?; - let validators_and_balances = committee_subscriptions - .iter() - .filter_map(|subscription| { - if let Ok(effective_balance) = finalized_beacon_state - .get_effective_balance(subscription.validator_index as usize) - { - Some((subscription.validator_index as usize, effective_balance)) - } else { - None - } - }) - .collect::>(); - - let current_slot = - chain.slot().map_err(warp_utils::reject::unhandled_error)?; - if let Some(cgc_change) = chain - .data_availability_checker - .custody_context() - .register_validators::( - validators_and_balances, - current_slot, - &chain.spec, - ) { - network_tx.send(NetworkMessage::CustodyCountChanged { - new_custody_group_count: cgc_change.new_custody_group_count, - sampling_count: cgc_change.sampling_count, - }).unwrap_or_else(|e| { - debug!(error = %e, "Could not send message to the network service. \ - Likely shutdown") - }); - } - } - Ok(()) }) }, @@ -3808,11 +3770,13 @@ pub fn serve( .and(warp::path("prepare_beacon_proposer")) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) + .and(network_tx_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(warp_utils::json::json()) .then( |not_synced_filter: Result<(), Rejection>, + network_tx: UnboundedSender>, task_spawner: TaskSpawner, chain: Arc>, preparation_data: Vec| { @@ -3849,6 +3813,42 @@ pub fn serve( )) })?; + if chain.spec.is_peer_das_scheduled() { + let (finalized_beacon_state, _, _) = + StateId(CoreStateId::Finalized).state(&chain)?; + let validators_and_balances = preparation_data + .iter() + .filter_map(|preparation| { + if let Ok(effective_balance) = finalized_beacon_state + .get_effective_balance(preparation.validator_index as usize) + { + Some((preparation.validator_index as usize, effective_balance)) + } else { + None + } + }) + .collect::>(); + + let current_slot = + chain.slot().map_err(warp_utils::reject::unhandled_error)?; + if let Some(cgc_change) = chain + .data_availability_checker + .custody_context() + .register_validators::( + validators_and_balances, + current_slot, + &chain.spec, + ) { + network_tx.send(NetworkMessage::CustodyCountChanged { + new_custody_group_count: cgc_change.new_custody_group_count, + sampling_count: cgc_change.sampling_count, + }).unwrap_or_else(|e| { + debug!(error = %e, "Could not send message to the network service. \ + Likely shutdown") + }); + } + } + Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) }) }, From fcc602a7872a867c8b0c60401b6e8e397e4d4332 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 2 Jul 2025 12:38:25 +1000 Subject: [PATCH 15/44] Update fulu network configs and add `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` (#7646) - #6240 - Bring built-in network configs up to date with latest consensus-spec PeerDAS configs. - Add `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` and use it to determine data availability window after the Fulu fork. --- .../src/data_availability_checker.rs | 29 +++--- beacon_node/store/src/hot_cold_store.rs | 15 ++- .../holesky/config.yaml | 5 +- .../hoodi/config.yaml | 3 +- .../mainnet/config.yaml | 5 +- .../sepolia/config.yaml | 5 +- consensus/types/src/chain_spec.rs | 92 +++++++++++++++++++ 7 files changed, 126 insertions(+), 28 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 1bc95c22ac..5404718048 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -486,14 +486,9 @@ impl DataAvailabilityChecker { /// The epoch at which we require a data availability check in block processing. /// `None` if the `Deneb` fork is disabled. pub fn data_availability_boundary(&self) -> Option { - let fork_epoch = self.spec.deneb_fork_epoch?; - let current_slot = self.slot_clock.now()?; - Some(std::cmp::max( - fork_epoch, - current_slot - .epoch(T::EthSpec::slots_per_epoch()) - .saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), - )) + let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); + self.spec + .min_epoch_data_availability_boundary(current_epoch) } /// Returns true if the given epoch lies within the da boundary and false otherwise. @@ -670,15 +665,17 @@ async fn availability_cache_maintenance_service( .fork_choice_read_lock() .finalized_checkpoint() .epoch; + + let Some(min_epochs_for_blobs) = chain + .spec + .min_epoch_data_availability_boundary(current_epoch) + else { + // Shutdown service if deneb fork epoch not set. Unreachable as the same check is performed above. + break; + }; + // any data belonging to an epoch before this should be pruned - let cutoff_epoch = std::cmp::max( - finalized_epoch + 1, - std::cmp::max( - current_epoch - .saturating_sub(chain.spec.min_epochs_for_blob_sidecars_requests), - deneb_fork_epoch, - ), - ); + let cutoff_epoch = std::cmp::max(finalized_epoch + 1, min_epochs_for_blobs); if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) { error!(error = ?e,"Failed to maintain availability cache"); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index f5e44f7ac9..0c230494b8 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -3074,18 +3074,17 @@ impl, Cold: ItemStore> HotColdDB /// Try to prune blobs, approximating the current epoch from the split slot. pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> { - let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else { - debug!("Deneb fork is disabled"); - return Ok(()); - }; // The current epoch is >= split_epoch + 2. It could be greater if the database is // configured to delay updating the split or finalization has ceased. In this instance we // choose to also delay the pruning of blobs (we never prune without finalization anyway). let min_current_epoch = self.get_split_slot().epoch(E::slots_per_epoch()) + 2; - let min_data_availability_boundary = std::cmp::max( - deneb_fork_epoch, - min_current_epoch.saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), - ); + let Some(min_data_availability_boundary) = self + .spec + .min_epoch_data_availability_boundary(min_current_epoch) + else { + debug!("Deneb fork is disabled"); + return Ok(()); + }; self.try_prune_blobs(force, min_data_availability_boundary) } diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 19a3f79cc0..76d8d482c2 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -141,6 +141,9 @@ MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 -MAX_BLOBS_PER_BLOCK_FULU: 12 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml index 5cca1cd037..a1365e3464 100644 --- a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml @@ -156,7 +156,8 @@ DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 -MAX_BLOBS_PER_BLOCK_FULU: 12 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # EIP7732 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 886e5d12ed..0b68a27f4d 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -156,6 +156,9 @@ MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 -MAX_BLOBS_PER_BLOCK_FULU: 12 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 10be107263..ccd71cdce9 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -142,6 +142,9 @@ MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 -MAX_BLOBS_PER_BLOCK_FULU: 12 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 38cd4b9217..631389ce43 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -246,6 +246,7 @@ pub struct ChainSpec { * Networking Fulu */ blob_schedule: BlobSchedule, + min_epochs_for_data_column_sidecars_requests: u64, /* * Networking Derived @@ -740,6 +741,20 @@ impl ChainSpec { Ok(std::cmp::max(custody_group_count, self.samples_per_slot)) } + /// Returns the min epoch for blob / data column sidecar requests based on the current epoch. + /// Switch to use the column sidecar config once the `blob_retention_epoch` has passed Fulu fork epoch. + pub fn min_epoch_data_availability_boundary(&self, current_epoch: Epoch) -> Option { + let fork_epoch = self.deneb_fork_epoch?; + let blob_retention_epoch = + current_epoch.saturating_sub(self.min_epochs_for_blob_sidecars_requests); + match self.fulu_fork_epoch { + Some(fulu_fork_epoch) if blob_retention_epoch > fulu_fork_epoch => Some( + current_epoch.saturating_sub(self.min_epochs_for_data_column_sidecars_requests), + ), + _ => Some(std::cmp::max(fork_epoch, blob_retention_epoch)), + } + } + pub fn all_data_column_sidecar_subnets(&self) -> impl Iterator { (0..self.data_column_sidecar_subnet_count).map(DataColumnSubnetId::new) } @@ -1027,6 +1042,8 @@ impl ChainSpec { * Networking Fulu specific */ blob_schedule: BlobSchedule::default(), + min_epochs_for_data_column_sidecars_requests: + default_min_epochs_for_data_column_sidecars_requests(), /* * Application specific @@ -1363,6 +1380,8 @@ impl ChainSpec { * Networking Fulu specific */ blob_schedule: BlobSchedule::default(), + min_epochs_for_data_column_sidecars_requests: + default_min_epochs_for_data_column_sidecars_requests(), /* * Application specific @@ -1661,6 +1680,9 @@ pub struct Config { #[serde(default = "default_balance_per_additional_custody_group")] #[serde(with = "serde_utils::quoted_u64")] balance_per_additional_custody_group: u64, + #[serde(default = "default_min_epochs_for_data_column_sidecars_requests")] + #[serde(with = "serde_utils::quoted_u64")] + min_epochs_for_data_column_sidecars_requests: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1834,6 +1856,10 @@ const fn default_balance_per_additional_custody_group() -> u64 { 32000000000 } +const fn default_min_epochs_for_data_column_sidecars_requests() -> u64 { + 4096 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -2045,6 +2071,8 @@ impl Config { blob_schedule: spec.blob_schedule.clone(), validator_custody_requirement: spec.validator_custody_requirement, balance_per_additional_custody_group: spec.balance_per_additional_custody_group, + min_epochs_for_data_column_sidecars_requests: spec + .min_epochs_for_data_column_sidecars_requests, } } @@ -2126,6 +2154,7 @@ impl Config { ref blob_schedule, validator_custody_requirement, balance_per_additional_custody_group, + min_epochs_for_data_column_sidecars_requests, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -2212,6 +2241,7 @@ impl Config { blob_schedule: blob_schedule.clone(), validator_custody_requirement, balance_per_additional_custody_group, + min_epochs_for_data_column_sidecars_requests, ..chain_spec.clone() }) @@ -2350,6 +2380,7 @@ mod tests { mod yaml_tests { use super::*; use paste::paste; + use std::sync::Arc; use tempfile::NamedTempFile; #[test] @@ -2649,4 +2680,65 @@ mod yaml_tests { let _ = spec.max_message_size(); let _ = spec.max_compressed_len(); } + + #[test] + fn min_epochs_for_data_sidecar_requests_deneb() { + type E = MainnetEthSpec; + let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); + let blob_retention_epochs = spec.min_epochs_for_blob_sidecars_requests; + + // `min_epochs_for_data_sidecar_requests` cannot be earlier than Deneb fork epoch. + assert_eq!( + spec.deneb_fork_epoch, + spec.min_epoch_data_availability_boundary(Epoch::new(blob_retention_epochs / 2)) + ); + + let current_epoch = Epoch::new(blob_retention_epochs * 2); + let expected_min_blob_epoch = current_epoch - blob_retention_epochs; + assert_eq!( + Some(expected_min_blob_epoch), + spec.min_epoch_data_availability_boundary(current_epoch) + ); + } + + #[test] + fn min_epochs_for_data_sidecar_requests_fulu() { + type E = MainnetEthSpec; + let spec = { + let mut spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + // 4096 * 2 = 8192 + spec.fulu_fork_epoch = Some(Epoch::new(spec.min_epochs_for_blob_sidecars_requests * 2)); + // set a different value for testing purpose, 4096 / 2 = 2048 + spec.min_epochs_for_data_column_sidecars_requests = + spec.min_epochs_for_blob_sidecars_requests / 2; + Arc::new(spec) + }; + let blob_retention_epochs = spec.min_epochs_for_blob_sidecars_requests; + let data_column_retention_epochs = spec.min_epochs_for_data_column_sidecars_requests; + + // `min_epochs_for_data_sidecar_requests` at fulu fork epoch still uses `min_epochs_for_blob_sidecars_requests` + let fulu_fork_epoch = spec.fulu_fork_epoch.unwrap(); + let expected_blob_retention_epoch = fulu_fork_epoch - blob_retention_epochs; + assert_eq!( + Some(expected_blob_retention_epoch), + spec.min_epoch_data_availability_boundary(fulu_fork_epoch) + ); + + // `min_epochs_for_data_sidecar_requests` at fulu fork epoch + min_epochs_for_blob_sidecars_request + let blob_retention_epoch_after_fulu = fulu_fork_epoch + blob_retention_epochs; + let expected_blob_retention_epoch = blob_retention_epoch_after_fulu - blob_retention_epochs; + assert_eq!( + Some(expected_blob_retention_epoch), + spec.min_epoch_data_availability_boundary(blob_retention_epoch_after_fulu) + ); + + // After the final blob retention epoch, `min_epochs_for_data_sidecar_requests` should be calculated + // using `min_epochs_for_data_column_sidecars_request` + let current_epoch = blob_retention_epoch_after_fulu + 1; + let expected_data_column_retention_epoch = current_epoch - data_column_retention_epochs; + assert_eq!( + Some(expected_data_column_retention_epoch), + spec.min_epoch_data_availability_boundary(current_epoch) + ); + } } From a459a9af98c9da7dbdf11e36ab2472a11cac4c52 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 2 Jul 2025 14:50:33 +1000 Subject: [PATCH 16/44] Fix and test checkpoint sync from genesis (#7689) Fix a bug involving checkpoint sync from genesis reported by Sunnyside labs. Ensure that the store's `anchor` is initialised prior to storing the genesis state. In the case of checkpoint sync from genesis, the genesis state will be in the _hot DB_, so we need the hot DB metadata to be initialised in order to store it. I've extended the existing checkpoint sync tests to cover this case as well. There are some subtleties around what the `state_upper_limit` should be set to in this case. I've opted to just enable state reconstruction from the start in the test so it gets set to 0, which results in an end state more consistent with the other test cases (full state reconstruction). This is required because we can't meaningfully do any state reconstruction when the split slot is 0 (there is no range of frozen slots to reconstruct). --- beacon_node/beacon_chain/src/builder.rs | 35 +-- beacon_node/beacon_chain/tests/store_tests.rs | 220 +++++++++++------- beacon_node/store/src/reconstruct.rs | 6 + 3 files changed, 158 insertions(+), 103 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ce4264d550..c46cc015c9 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -514,9 +514,26 @@ where "Storing split from weak subjectivity state" ); - // Set the store's split point *before* storing genesis so that genesis is stored - // immediately in the freezer DB. + // Set the store's split point *before* storing genesis so that if the genesis state + // is prior to the split slot, it will immediately be stored in the freezer DB. store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); + + // It is also possible for the checkpoint state to be equal to the genesis state, in which + // case it will be stored in the hot DB. In this case, we need to ensure the store's anchor + // is initialised prior to storing the state, as the anchor is required for working out + // hdiff storage strategies. + let retain_historic_states = self.chain_config.reconstruct_historic_states; + self.pending_io_batch.push( + store + .init_anchor_info( + weak_subj_block.parent_root(), + weak_subj_block.slot(), + weak_subj_slot, + retain_historic_states, + ) + .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, + ); + let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; @@ -541,20 +558,6 @@ where "Stored frozen block roots at skipped slots" ); - // Write the anchor to memory before calling `put_state` otherwise hot hdiff can't store - // states that do not align with the `start_slot` grid. - let retain_historic_states = self.chain_config.reconstruct_historic_states; - self.pending_io_batch.push( - store - .init_anchor_info( - weak_subj_block.parent_root(), - weak_subj_block.slot(), - weak_subj_slot, - retain_historic_states, - ) - .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, - ); - // Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten // about on a crash restart. store diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 1be2879e1a..d1a53d9b66 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2281,6 +2281,19 @@ async fn weak_subjectivity_sync_skips_at_genesis() { weak_subjectivity_sync_test(slots, checkpoint_slot).await } +// Checkpoint sync from the genesis state. +// +// This is a regression test for a bug we had involving the storage of the genesis state in the hot +// DB. +#[tokio::test] +async fn weak_subjectivity_sync_from_genesis() { + let start_slot = 1; + let end_slot = E::slots_per_epoch() * 2; + let slots = (start_slot..end_slot).map(Slot::new).collect(); + let checkpoint_slot = Slot::new(0); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Build an initial chain on one harness, representing a synced node with full history. let num_final_blocks = E::slots_per_epoch() * 2; @@ -2367,7 +2380,15 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { ); slot_clock.set_slot(harness.get_current_slot().as_u64()); + let chain_config = ChainConfig { + // Set reconstruct_historic_states to true from the start in the genesis case. This makes + // some of the later checks more uniform across the genesis/non-genesis cases. + reconstruct_historic_states: checkpoint_slot == 0, + ..ChainConfig::default() + }; + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) + .chain_config(chain_config) .store(store.clone()) .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) @@ -2381,7 +2402,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .store_migrator_config(MigratorConfig::default().blocking()) .slot_clock(slot_clock) .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) .execution_layer(Some(mock.el)) .rng(Box::new(StdRng::seed_from_u64(42))) @@ -2449,96 +2469,118 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); } - // Forwards iterator from 0 should fail as we lack blocks. - assert!(matches!( - beacon_chain.forwards_iter_block_roots(Slot::new(0)), - Err(BeaconChainError::HistoricalBlockOutOfRange { .. }) - )); - - // Simulate processing of a `StatusMessage` with an older finalized epoch by calling - // `block_root_at_slot` with an old slot for which we don't know the block root. It should - // return `None` rather than erroring. - assert_eq!( - beacon_chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap(), - None - ); - - // Simulate querying the API for a historic state that is unknown. It should also return - // `None` rather than erroring. - assert_eq!(beacon_chain.state_root_at_slot(Slot::new(1)).unwrap(), None); - - // Supply blocks backwards to reach genesis. Omit the genesis block to check genesis handling. - let historical_blocks = chain_dump[..wss_block.slot().as_usize()] - .iter() - .filter(|s| s.beacon_block.slot() != 0) - .map(|s| s.beacon_block.clone()) - .collect::>(); - - let mut available_blocks = vec![]; - for blinded in historical_blocks { - let block_root = blinded.canonical_root(); - let full_block = harness - .chain - .get_block(&block_root) - .await - .expect("should get block") - .expect("should get block"); - - if let MaybeAvailableBlock::Available(block) = harness - .chain - .data_availability_checker - .verify_kzg_for_rpc_block( - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), - ) - .expect("should verify kzg") - { - available_blocks.push(block); - } + if checkpoint_slot != 0 { + // Forwards iterator from 0 should fail as we lack blocks (unless checkpoint slot is 0). + assert!(matches!( + beacon_chain.forwards_iter_block_roots(Slot::new(0)), + Err(BeaconChainError::HistoricalBlockOutOfRange { .. }) + )); + } else { + assert_eq!( + beacon_chain + .forwards_iter_block_roots(Slot::new(0)) + .unwrap() + .next() + .unwrap() + .unwrap(), + (wss_block_root, Slot::new(0)) + ); } - // Corrupt the signature on the 1st block to ensure that the backfill processor is checking - // signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120. - let mut batch_with_invalid_first_block = - available_blocks.iter().map(clone_block).collect::>(); - batch_with_invalid_first_block[0] = { - let (block_root, block, data) = clone_block(&available_blocks[0]).deconstruct(); - let mut corrupt_block = (*block).clone(); - *corrupt_block.signature_mut() = Signature::empty(); - AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), data, Arc::new(spec)) - }; + // The checks in this block only make sense if some data is missing as a result of the + // checkpoint sync, i.e. if we are not just checkpoint syncing from genesis. + if checkpoint_slot != 0 { + // Simulate processing of a `StatusMessage` with an older finalized epoch by calling + // `block_root_at_slot` with an old slot for which we don't know the block root. It should + // return `None` rather than erroring. + assert_eq!( + beacon_chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap(), + None + ); - // Importing the invalid batch should error. - assert!(matches!( + // Simulate querying the API for a historic state that is unknown. It should also return + // `None` rather than erroring. + assert_eq!(beacon_chain.state_root_at_slot(Slot::new(1)).unwrap(), None); + + // Supply blocks backwards to reach genesis. Omit the genesis block to check genesis handling. + let historical_blocks = chain_dump[..wss_block.slot().as_usize()] + .iter() + .filter(|s| s.beacon_block.slot() != 0) + .map(|s| s.beacon_block.clone()) + .collect::>(); + + let mut available_blocks = vec![]; + for blinded in historical_blocks { + let block_root = blinded.canonical_root(); + let full_block = harness + .chain + .get_block(&block_root) + .await + .expect("should get block") + .expect("should get block"); + + if let MaybeAvailableBlock::Available(block) = harness + .chain + .data_availability_checker + .verify_kzg_for_rpc_block( + harness + .build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), + ) + .expect("should verify kzg") + { + available_blocks.push(block); + } + } + + // Corrupt the signature on the 1st block to ensure that the backfill processor is checking + // signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120. + let mut batch_with_invalid_first_block = + available_blocks.iter().map(clone_block).collect::>(); + batch_with_invalid_first_block[0] = { + let (block_root, block, data) = clone_block(&available_blocks[0]).deconstruct(); + let mut corrupt_block = (*block).clone(); + *corrupt_block.signature_mut() = Signature::empty(); + AvailableBlock::__new_for_testing( + block_root, + Arc::new(corrupt_block), + data, + Arc::new(spec), + ) + }; + + // Importing the invalid batch should error. + assert!(matches!( + beacon_chain + .import_historical_block_batch(batch_with_invalid_first_block) + .unwrap_err(), + HistoricalBlockError::InvalidSignature + )); + + let available_blocks_slots = available_blocks + .iter() + .map(|block| (block.block().slot(), block.block().canonical_root())) + .collect::>(); + info!( + ?available_blocks_slots, + "wss_block_slot" = wss_block.slot().as_usize(), + "Importing historical block batch" + ); + + // Importing the batch with valid signatures should succeed. + let available_blocks_dup = available_blocks.iter().map(clone_block).collect::>(); + assert_eq!(beacon_chain.store.get_oldest_block_slot(), wss_block.slot()); beacon_chain - .import_historical_block_batch(batch_with_invalid_first_block) - .unwrap_err(), - HistoricalBlockError::InvalidSignature - )); + .import_historical_block_batch(available_blocks_dup) + .unwrap(); + assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); - let available_blocks_slots = available_blocks - .iter() - .map(|block| (block.block().slot(), block.block().canonical_root())) - .collect::>(); - info!( - ?available_blocks_slots, - "wss_block_slot" = wss_block.slot().as_usize(), - "Importing historical block batch" - ); - - // Importing the batch with valid signatures should succeed. - let available_blocks_dup = available_blocks.iter().map(clone_block).collect::>(); - assert_eq!(beacon_chain.store.get_oldest_block_slot(), wss_block.slot()); - beacon_chain - .import_historical_block_batch(available_blocks_dup) - .unwrap(); - assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); - - // Resupplying the blocks should not fail, they can be safely ignored. - beacon_chain - .import_historical_block_batch(available_blocks) - .unwrap(); + // Resupplying the blocks should not fail, they can be safely ignored. + beacon_chain + .import_historical_block_batch(available_blocks) + .unwrap(); + } // Sanity check for non-aligned WSS starts, to make sure the WSS block is persisted properly if wss_block_slot != wss_state_slot { @@ -2615,7 +2657,11 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); assert_eq!( store.get_anchor_info().state_upper_limit, - Slot::new(u64::MAX) + if checkpoint_slot == 0 { + Slot::new(0) + } else { + Slot::new(u64::MAX) + } ); info!(anchor = ?store.get_anchor_info(), "anchor pre"); diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index ade111983b..4bd8f12ead 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -47,6 +47,12 @@ where let lower_limit_slot = anchor.state_lower_limit; let upper_limit_slot = std::cmp::min(split.slot, anchor.state_upper_limit); + // If the split is at 0 we can't reconstruct historic states. + if split.slot == 0 { + debug!("No state reconstruction possible"); + return Ok(()); + } + // If `num_blocks` is not specified iterate all blocks. Add 1 so that we end on an epoch // boundary when `num_blocks` is a multiple of an epoch boundary. We want to be *inclusive* // of the state at slot `lower_limit_slot + num_blocks`. From b35854b71f04070884856403319c5bf1552f179d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 2 Jul 2025 18:47:35 +1000 Subject: [PATCH 17/44] Record v2 beacon blocks http api metrics separately (#7692) This PR adds v2 beacon block paths to the function that records http api usage, so they don't just get recorded as "/v2/beacon" like below: image --- beacon_node/http_api/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index c757ca035b..e9b2e8e6bf 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -211,9 +211,11 @@ pub fn prometheus_metrics() -> warp::filters::log::Log Date: Thu, 3 Jul 2025 09:40:04 +1000 Subject: [PATCH 18/44] Fix lookups of the block at `oldest_block_slot` (#7693) Closes: - https://github.com/sigp/lighthouse/issues/7690 Another checkpoint sync related fix! See issue for a description of the bug. We fix it by just loading the block root of the `oldest_block_slot`, rather than trying to load the slot prior, which will always fail. --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++ beacon_node/beacon_chain/tests/store_tests.rs | 93 +++++++++++++------ 2 files changed, 74 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 65318835cc..9900535b2c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -991,6 +991,14 @@ impl BeaconChain { return Ok(root_opt); } + // Do not try to access the previous slot if it's older than the oldest block root + // stored in the database. Instead, load just the block root at `oldest_block_slot`, + // under the assumption that the `oldest_block_slot` *is not* a skipped slot (should be + // true because it is set by the oldest *block*). + if request_slot == self.store.get_anchor_info().oldest_block_slot { + return self.block_root_at_slot_skips_prev(request_slot); + } + if let Some(((prev_root, _), (curr_root, curr_slot))) = process_results( self.forwards_iter_block_roots_until(prev_slot, request_slot)?, |iter| iter.tuple_windows().next(), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d1a53d9b66..e9b19ee6e0 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2238,7 +2238,15 @@ async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let slots = (1..num_initial_slots).map(Slot::new).collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_single_block_batches() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots).map(Slot::new).collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot, Some(1)).await } #[tokio::test] @@ -2252,7 +2260,7 @@ async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { slot <= checkpoint_slot - 3 || slot > checkpoint_slot }) .collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None).await } #[tokio::test] @@ -2266,7 +2274,7 @@ async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { slot <= checkpoint_slot || slot > checkpoint_slot + 3 }) .collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None).await } // Regression test for https://github.com/sigp/lighthouse/issues/4817 @@ -2278,7 +2286,7 @@ async fn weak_subjectivity_sync_skips_at_genesis() { let end_slot = E::slots_per_epoch() * 4; let slots = (start_slot..end_slot).map(Slot::new).collect(); let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); - weak_subjectivity_sync_test(slots, checkpoint_slot).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None).await } // Checkpoint sync from the genesis state. @@ -2291,10 +2299,14 @@ async fn weak_subjectivity_sync_from_genesis() { let end_slot = E::slots_per_epoch() * 2; let slots = (start_slot..end_slot).map(Slot::new).collect(); let checkpoint_slot = Slot::new(0); - weak_subjectivity_sync_test(slots, checkpoint_slot).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None).await } -async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { +async fn weak_subjectivity_sync_test( + slots: Vec, + checkpoint_slot: Slot, + backfill_batch_size: Option, +) { // Build an initial chain on one harness, representing a synced node with full history. let num_final_blocks = E::slots_per_epoch() * 2; @@ -2557,30 +2569,57 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap_err(), HistoricalBlockError::InvalidSignature )); - - let available_blocks_slots = available_blocks - .iter() - .map(|block| (block.block().slot(), block.block().canonical_root())) - .collect::>(); - info!( - ?available_blocks_slots, - "wss_block_slot" = wss_block.slot().as_usize(), - "Importing historical block batch" - ); - - // Importing the batch with valid signatures should succeed. - let available_blocks_dup = available_blocks.iter().map(clone_block).collect::>(); assert_eq!(beacon_chain.store.get_oldest_block_slot(), wss_block.slot()); - beacon_chain - .import_historical_block_batch(available_blocks_dup) - .unwrap(); - assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); - // Resupplying the blocks should not fail, they can be safely ignored. - beacon_chain - .import_historical_block_batch(available_blocks) - .unwrap(); + let batch_size = backfill_batch_size.unwrap_or(available_blocks.len()); + + for batch in available_blocks.rchunks(batch_size) { + let available_blocks_slots = batch + .iter() + .map(|block| (block.block().slot(), block.block().canonical_root())) + .collect::>(); + info!( + ?available_blocks_slots, + "wss_block_slot" = wss_block.slot().as_usize(), + "Importing historical block batch" + ); + + // Importing the batch with valid signatures should succeed. + let available_blocks_batch1 = batch.iter().map(clone_block).collect::>(); + beacon_chain + .import_historical_block_batch(available_blocks_batch1) + .unwrap(); + + // We should be able to load the block root at the `oldest_block_slot`. + // + // This is a regression test for: https://github.com/sigp/lighthouse/issues/7690 + let oldest_block_imported = &batch[0]; + let (oldest_block_slot, oldest_block_root) = + if oldest_block_imported.block().parent_root() == beacon_chain.genesis_block_root { + (Slot::new(0), beacon_chain.genesis_block_root) + } else { + available_blocks_slots[0] + }; + assert_eq!( + beacon_chain.store.get_oldest_block_slot(), + oldest_block_slot + ); + assert_eq!( + beacon_chain + .block_root_at_slot(oldest_block_slot, WhenSlotSkipped::None) + .unwrap() + .unwrap(), + oldest_block_root + ); + + // Resupplying the blocks should not fail, they can be safely ignored. + let available_blocks_batch2 = batch.iter().map(clone_block).collect::>(); + beacon_chain + .import_historical_block_batch(available_blocks_batch2) + .unwrap(); + } } + assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); // Sanity check for non-aligned WSS starts, to make sure the WSS block is persisted properly if wss_block_slot != wss_state_slot { From 0f895f3066a39b6ba4f05053037d38f141271655 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 4 Jul 2025 15:54:30 -0700 Subject: [PATCH 19/44] Bump default gas limit (#7695) N/A Bump the default gas limit to 45 million based on recommendation from EL teams https://x.com/vdWijden/status/1939234101631856969 and pandas https://ethpandaops.io/posts/gaslimit-scaling/ --- .../src/test_utils/execution_block_generator.rs | 2 +- beacon_node/execution_layer/src/test_utils/mock_builder.rs | 2 +- beacon_node/http_api/tests/tests.rs | 4 ++-- book/src/help_vc.md | 2 +- lighthouse/tests/validator_client.rs | 2 +- validator_client/lighthouse_validator_store/src/lib.rs | 4 ++-- validator_client/src/cli.rs | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index e01b8de9e3..aefb6d6750 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -29,7 +29,7 @@ use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); const TEST_BLOB_BUNDLE_V2: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle_v2.ssz"); -pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +pub const DEFAULT_GAS_LIMIT: u64 = 45_000_000; const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; #[derive(Clone, Debug, PartialEq)] diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 3704bcc592..751e99494c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -40,7 +40,7 @@ use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; pub const DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); -pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +pub const DEFAULT_GAS_LIMIT: u64 = 45_000_000; pub const DEFAULT_BUILDER_PRIVATE_KEY: &str = "607a11b45a7219cc61a3d9c5fd08c7eebd602a6a19a977f8d3771d5711a550f2"; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 955b44c36c..ecd20f3f79 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -4669,7 +4669,7 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .add_operation(Operation::GasLimit(30_000_000)); + .add_operation(Operation::GasLimit(DEFAULT_GAS_LIMIT as usize)); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -4692,7 +4692,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 30_000_000); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 15b5c209a7..0bc4bbf53d 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -40,7 +40,7 @@ Options: The gas limit to be used in all builder proposals for all validators managed by this validator client. Note this will not necessarily be used if the gas limit set here moves too far from the previous block's - gas limit. [default: 36000000] + gas limit. [default: 45000000] --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index f99fc3c460..7bda1868c8 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -505,7 +505,7 @@ fn no_doppelganger_protection_flag() { fn no_gas_limit_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(config.validator_store.gas_limit == Some(36_000_000))); + .with_config(|config| assert!(config.validator_store.gas_limit == Some(45_000_000))); } #[test] fn gas_limit_flag() { diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 2cb6ba435e..67af1d73fe 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -55,8 +55,8 @@ const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; /// Currently used as the default gas limit in execution clients. /// -/// https://ethresear.ch/t/on-increasing-the-block-gas-limit-technical-considerations-path-forward/21225. -pub const DEFAULT_GAS_LIMIT: u64 = 36_000_000; +/// https://ethpandaops.io/posts/gaslimit-scaling/. +pub const DEFAULT_GAS_LIMIT: u64 = 45_000_000; pub struct LighthouseValidatorStore { validators: Arc>, diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index cdbf9f8472..e1cce5c9da 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -388,7 +388,7 @@ pub struct ValidatorClient { #[clap( long, value_name = "INTEGER", - default_value_t = 36_000_000, + default_value_t = 45_000_000, requires = "builder_proposals", help = "The gas limit to be used in all builder proposals for all validators managed \ by this validator client. Note this will not necessarily be used if the gas limit \ From 01ec2ec7ad871e2c83ab96a2266701e069e44959 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 7 Jul 2025 14:42:34 +1000 Subject: [PATCH 20/44] Update LH book for v7.1.0 (#7706) Update the book for upcoming v7.1.0 release. This is targeted at `unstable` rather than `release-v7.1.0` because the book is built from `unstable`. --- book/src/advanced_builders.md | 4 ++-- book/src/advanced_database_migrations.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/book/src/advanced_builders.md b/book/src/advanced_builders.md index d9468898b4..de7d02d956 100644 --- a/book/src/advanced_builders.md +++ b/book/src/advanced_builders.md @@ -60,7 +60,7 @@ relays, run one of the following services and configure lighthouse to use it wit ## Validator Client Configuration In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is -configured, Lighthouse will use a default gas limit of 30,000,000, which is the current default value used in execution +configured, Lighthouse will use a default gas limit of 45,000,000, which is the current default value used in execution engines. You can also enable or disable use of external builders on a per-validator basis rather than using `--builder-proposals`, `--builder-boost-factor` or `--prefer-builder-proposals`, which apply builder related preferences for all validators. In order to manage these configurations per-validator, you can either make updates to the `validator_definitions.yml` file @@ -75,7 +75,7 @@ transaction within the block to the fee recipient, so a discrepancy in fee recip is something afoot. > Note: The gas limit configured here is effectively a vote on block size, so the configuration should not be taken lightly. -> 30,000,000 is currently seen as a value balancing block size with how expensive it is for +> 45,000,000 is currently seen as a value balancing block size with how expensive it is for > the network to validate blocks. So if you don't feel comfortable making an informed "vote", using the default value is > encouraged. We will update the default value if the community reaches a rough consensus on a new value. diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index f92ae7846b..e29397619c 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -17,7 +17,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| -| v7.1.0 | TBD 2025 | v23 | yes | +| v7.1.0 | Jul 2025 | v26 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | @@ -207,7 +207,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| -| v7.1.0 | TBD 2025 | v23 | yes | +| v7.1.0 | Jul 2025 | v26 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes before Electra using <= v7.0.0 | From 3e6b0bd0a36dfa434a779812b648d15c9e67025f Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Mon, 7 Jul 2025 12:46:18 +0200 Subject: [PATCH 21/44] Make `notifier_service::notify` pub (#7708) Anchor wants the `notify` function to run only in certain cases - so the `spawn_notifier` function is unsuitable for us. Anchor uses it's own `notify` function, which then calls `notifier_service::notify` (in most circumstances). To enable that, `notify` needs to be `pub`. --- validator_client/validator_services/src/notifier_service.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/validator_client/validator_services/src/notifier_service.rs b/validator_client/validator_services/src/notifier_service.rs index 6b8ea04edb..55a583774e 100644 --- a/validator_client/validator_services/src/notifier_service.rs +++ b/validator_client/validator_services/src/notifier_service.rs @@ -35,7 +35,9 @@ pub fn spawn_notifier( } /// Performs a single notification routine. -async fn notify(duties_service: &DutiesService) { +pub async fn notify( + duties_service: &DutiesService, +) { let (candidate_info, num_available, num_synced) = duties_service.beacon_nodes.get_notifier_info().await; let num_total = candidate_info.len(); From 56485cc9865a9e81d5548884de1ed711bed32788 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 8 Jul 2025 10:37:54 +1000 Subject: [PATCH 22/44] Remove unneeded spans that caused debug logs to appear when level is set to `info` (#7707) Fixes #7155. It turns out the issue is caused by calling a function that creates an info span (`chain.id()` here), e.g. ```rust debug!(id = chain.id(), ?sync_type, reason = ?remove_reason, op, "Chain removed"); ``` I've remove all unneeded spans, especially getter functions - there's little reasons for span and they often get used in logging. We should also revisit all the spans after the release - i think we could make them more useful than they are today. I've let it run for a while and no longer seeing any `DEBUG` logs. --- beacon_node/network/src/sync/range_sync/chain.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index cc49c43711..e3794bd2be 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -153,25 +153,21 @@ impl SyncingChain { } /// Check if the chain has peers from which to process batches. - #[instrument(parent = None,fields(chain = self.id , service = "range_sync"), skip_all)] pub fn available_peers(&self) -> usize { self.peers.len() } /// Get the chain's id. - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn id(&self) -> ChainId { self.id } /// Peers currently syncing this chain. - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn peers(&self) -> impl Iterator + '_ { self.peers.iter().cloned() } /// Progress in epochs made by the chain - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn processed_epochs(&self) -> u64 { self.processing_target .saturating_sub(self.start_epoch) @@ -179,7 +175,6 @@ impl SyncingChain { } /// Returns the total count of pending blocks in all the batches of this chain - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn pending_blocks(&self) -> usize { self.batches .values() @@ -189,7 +184,6 @@ impl SyncingChain { /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn remove_peer(&mut self, peer_id: &PeerId) -> ProcessingResult { self.peers.remove(peer_id); @@ -201,7 +195,6 @@ impl SyncingChain { } /// Returns the latest slot number that has been processed. - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn current_processed_slot(&self) -> Slot { // the last slot we processed was included in the previous batch, and corresponds to the // first slot of the current target epoch @@ -959,7 +952,6 @@ impl SyncingChain { } /// Returns true if this chain is currently syncing. - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn is_syncing(&self) -> bool { match self.state { ChainSyncingState::Syncing => true, @@ -1115,7 +1107,6 @@ impl SyncingChain { /// This produces a string of the form: [D,E,E,E,E] /// to indicate the current buffer state of the chain. The symbols are defined on each of the /// batch states. See [BatchState::visualize] for symbol definitions. - #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn visualize_batch_state(&self) -> String { let mut visualization_string = String::with_capacity((BATCH_BUFFER_SIZE * 3) as usize); From bd8a2a8ffbaaf2247703db7852400fca4dca552c Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 8 Jul 2025 10:07:10 +0300 Subject: [PATCH 23/44] Gossip recently computed light client data (#7023) --- ...ght_client_finality_update_verification.rs | 99 +++++++++++++++++-- ...t_client_optimistic_update_verification.rs | 75 ++++++++++++-- .../src/light_client_server_cache.rs | 85 ++++++++++++++++ beacon_node/http_api/src/lib.rs | 2 +- beacon_node/http_api/src/sync_committees.rs | 32 ++++++ .../gossip_methods.rs | 11 ++- .../types/src/light_client_finality_update.rs | 17 +++- .../src/light_client_optimistic_update.rs | 3 +- testing/simulator/src/checks.rs | 4 +- 9 files changed, 307 insertions(+), 21 deletions(-) diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 879fa02f7d..0d5a5425d5 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -3,7 +3,7 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::LightClientFinalityUpdate; +use types::{Hash256, LightClientFinalityUpdate, Slot}; /// Returned when a light client finality update was not successfully verified. It might not have been verified for /// two reasons: @@ -21,12 +21,37 @@ pub enum Error { /// /// Assuming the local clock is correct, the peer has sent an invalid message. TooEarly, - /// Light client finality update message does not match the locally constructed one. - InvalidLightClientFinalityUpdate, + /// Light client finalized update message does not match the locally constructed one, it has a + /// different signature slot. + MismatchedSignatureSlot { local: Slot, observed: Slot }, + /// Light client finalized update message does not match the locally constructed one, it has a + /// different finalized block header for the same signature slot. + MismatchedFinalizedHeader { + local_finalized_header_root: Hash256, + observed_finalized_header_root: Hash256, + signature_slot: Slot, + }, + /// Light client finalized update message does not match the locally constructed one, it has a + /// different attested block header for the same signature slot and finalized header. + MismatchedAttestedHeader { + local_attested_header_root: Hash256, + observed_attested_header_root: Hash256, + finalized_header_root: Hash256, + signature_slot: Slot, + }, + /// Light client finalized update message does not match the locally constructed one, it has a + /// different proof or sync aggregate for the same slot, attested header and finalized header. + MismatchedProofOrSyncAggregate { + attested_header_root: Hash256, + finalized_header_root: Hash256, + signature_slot: Slot, + }, /// Signature slot start time is none. SigSlotStartIsNone, /// Failed to construct a LightClientFinalityUpdate from state. FailedConstructingUpdate, + /// Silently ignore this light client finality update + Ignore, } /// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. @@ -48,7 +73,7 @@ impl VerifiedLightClientFinalityUpdate { // verify that enough time has passed for the block to have been propagated let start_time = chain .slot_clock - .start_of(*rcv_finality_update.signature_slot()) + .start_of(rcv_finality_update.signature_slot()) .ok_or(Error::SigSlotStartIsNone)?; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() @@ -57,16 +82,76 @@ impl VerifiedLightClientFinalityUpdate { return Err(Error::TooEarly); } + if let Some(latest_broadcasted_finality_update) = chain + .light_client_server_cache + .get_latest_broadcasted_finality_update() + { + // Ignore the incoming finality update if we've already broadcasted it + if latest_broadcasted_finality_update == rcv_finality_update { + return Err(Error::Ignore); + } + + // Ignore the incoming finality update if the latest broadcasted attested header slot + // is greater than the incoming attested header slot. + if latest_broadcasted_finality_update.get_attested_header_slot() + > rcv_finality_update.get_attested_header_slot() + { + return Err(Error::Ignore); + } + } + let latest_finality_update = chain .light_client_server_cache .get_latest_finality_update() .ok_or(Error::FailedConstructingUpdate)?; - // verify that the gossiped finality update is the same as the locally constructed one. - if latest_finality_update != rcv_finality_update { - return Err(Error::InvalidLightClientFinalityUpdate); + // Ignore the incoming finality update if the latest constructed attested header slot + // is greater than the incoming attested header slot. + if latest_finality_update.get_attested_header_slot() + > rcv_finality_update.get_attested_header_slot() + { + return Err(Error::Ignore); } + // Verify that the gossiped finality update is the same as the locally constructed one. + if latest_finality_update != rcv_finality_update { + let signature_slot = latest_finality_update.signature_slot(); + if signature_slot != rcv_finality_update.signature_slot() { + return Err(Error::MismatchedSignatureSlot { + local: signature_slot, + observed: rcv_finality_update.signature_slot(), + }); + } + let local_finalized_header_root = latest_finality_update.get_finalized_header_root(); + let observed_finalized_header_root = rcv_finality_update.get_finalized_header_root(); + if local_finalized_header_root != observed_finalized_header_root { + return Err(Error::MismatchedFinalizedHeader { + local_finalized_header_root, + observed_finalized_header_root, + signature_slot, + }); + } + let local_attested_header_root = latest_finality_update.get_attested_header_root(); + let observed_attested_header_root = rcv_finality_update.get_attested_header_root(); + if local_attested_header_root != observed_attested_header_root { + return Err(Error::MismatchedAttestedHeader { + local_attested_header_root, + observed_attested_header_root, + finalized_header_root: local_finalized_header_root, + signature_slot, + }); + } + return Err(Error::MismatchedProofOrSyncAggregate { + attested_header_root: local_attested_header_root, + finalized_header_root: local_finalized_header_root, + signature_slot, + }); + } + + chain + .light_client_server_cache + .set_latest_broadcasted_finality_update(rcv_finality_update.clone()); + Ok(Self { light_client_finality_update: rcv_finality_update, seen_timestamp, diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 5665adc3ed..4da6913443 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -4,7 +4,7 @@ use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::LightClientOptimisticUpdate; +use types::{LightClientOptimisticUpdate, Slot}; /// Returned when a light client optimistic update was not successfully verified. It might not have been verified for /// two reasons: @@ -22,14 +22,30 @@ pub enum Error { /// /// Assuming the local clock is correct, the peer has sent an invalid message. TooEarly, - /// Light client optimistic update message does not match the locally constructed one. - InvalidLightClientOptimisticUpdate, + /// Light client optimistic update message does not match the locally constructed one, it has a + /// different signature slot. + MismatchedSignatureSlot { local: Slot, observed: Slot }, + /// Light client optimistic update message does not match the locally constructed one, it has a + /// different block header at the same slot. + MismatchedAttestedHeader { + local_attested_header_root: Hash256, + observed_attested_header_root: Hash256, + signature_slot: Slot, + }, + /// Light client optimistic update message does not match the locally constructed one, it has a + /// different sync aggregate for the same slot and attested header. + MismatchedSyncAggregate { + attested_header_root: Hash256, + signature_slot: Slot, + }, /// Signature slot start time is none. SigSlotStartIsNone, /// Failed to construct a LightClientOptimisticUpdate from state. FailedConstructingUpdate, /// Unknown block with parent root. UnknownBlockParentRoot(Hash256), + /// Silently ignore this light client optimistic update + Ignore, } /// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. @@ -52,7 +68,7 @@ impl VerifiedLightClientOptimisticUpdate { // verify that enough time has passed for the block to have been propagated let start_time = chain .slot_clock - .start_of(*rcv_optimistic_update.signature_slot()) + .start_of(rcv_optimistic_update.signature_slot()) .ok_or(Error::SigSlotStartIsNone)?; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() @@ -61,6 +77,22 @@ impl VerifiedLightClientOptimisticUpdate { return Err(Error::TooEarly); } + if let Some(latest_broadcasted_optimistic_update) = chain + .light_client_server_cache + .get_latest_broadcasted_optimistic_update() + { + // Ignore the incoming optimistic update if we've already broadcasted it + if latest_broadcasted_optimistic_update == rcv_optimistic_update { + return Err(Error::Ignore); + } + + // Ignore the incoming optimistic update if the latest broadcasted slot + // is greater than the incoming slot. + if latest_broadcasted_optimistic_update.get_slot() > rcv_optimistic_update.get_slot() { + return Err(Error::Ignore); + } + } + let head = chain.canonical_head.cached_head(); let head_block = &head.snapshot.beacon_block; // check if we can process the optimistic update immediately @@ -76,11 +108,40 @@ impl VerifiedLightClientOptimisticUpdate { .get_latest_optimistic_update() .ok_or(Error::FailedConstructingUpdate)?; - // verify that the gossiped optimistic update is the same as the locally constructed one. - if latest_optimistic_update != rcv_optimistic_update { - return Err(Error::InvalidLightClientOptimisticUpdate); + // Ignore the incoming optimistic update if the latest constructed slot + // is greater than the incoming slot. + if latest_optimistic_update.get_slot() > rcv_optimistic_update.get_slot() { + return Err(Error::Ignore); } + // Verify that the gossiped optimistic update is the same as the locally constructed one. + if latest_optimistic_update != rcv_optimistic_update { + let signature_slot = latest_optimistic_update.signature_slot(); + if signature_slot != rcv_optimistic_update.signature_slot() { + return Err(Error::MismatchedSignatureSlot { + local: signature_slot, + observed: rcv_optimistic_update.signature_slot(), + }); + } + let local_attested_header_root = latest_optimistic_update.get_canonical_root(); + let observed_attested_header_root = rcv_optimistic_update.get_canonical_root(); + if local_attested_header_root != observed_attested_header_root { + return Err(Error::MismatchedAttestedHeader { + local_attested_header_root, + observed_attested_header_root, + signature_slot, + }); + } + return Err(Error::MismatchedSyncAggregate { + attested_header_root: local_attested_header_root, + signature_slot, + }); + } + + chain + .light_client_server_cache + .set_latest_broadcasted_optimistic_update(rcv_optimistic_update.clone()); + let parent_root = rcv_optimistic_update.get_parent_root(); Ok(Self { light_client_optimistic_update: rcv_optimistic_update, diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 3099c451c0..22122ee554 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -40,6 +40,10 @@ pub struct LightClientServerCache { latest_written_current_sync_committee: RwLock>>>, /// Caches state proofs by block root prev_block_cache: Mutex>>, + /// Tracks the latest broadcasted finality update + latest_broadcasted_finality_update: RwLock>>, + /// Tracks the latest broadcasted optimistic update + latest_broadcasted_optimistic_update: RwLock>>, } impl LightClientServerCache { @@ -49,6 +53,8 @@ impl LightClientServerCache { latest_optimistic_update: None.into(), latest_light_client_update: None.into(), latest_written_current_sync_committee: None.into(), + latest_broadcasted_finality_update: None.into(), + latest_broadcasted_optimistic_update: None.into(), prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), } } @@ -334,10 +340,89 @@ impl LightClientServerCache { Ok(new_value) } + /// Checks if we've already broadcasted the latest finality update. + /// If we haven't, update the `latest_broadcasted_finality_update` cache + /// and return the latest finality update for broadcasting, else return `None`. + pub fn should_broadcast_latest_finality_update( + &self, + ) -> Option> { + if let Some(latest_finality_update) = self.get_latest_finality_update() { + let latest_broadcasted_finality_update = self.get_latest_broadcasted_finality_update(); + match latest_broadcasted_finality_update { + Some(latest_broadcasted_finality_update) => { + if latest_broadcasted_finality_update != latest_finality_update { + self.set_latest_broadcasted_finality_update(latest_finality_update.clone()); + return Some(latest_finality_update); + } + } + None => { + self.set_latest_broadcasted_finality_update(latest_finality_update.clone()); + return Some(latest_finality_update); + } + } + } + + None + } + pub fn get_latest_finality_update(&self) -> Option> { self.latest_finality_update.read().clone() } + pub fn get_latest_broadcasted_optimistic_update( + &self, + ) -> Option> { + self.latest_broadcasted_optimistic_update.read().clone() + } + + pub fn get_latest_broadcasted_finality_update( + &self, + ) -> Option> { + self.latest_broadcasted_finality_update.read().clone() + } + + pub fn set_latest_broadcasted_optimistic_update( + &self, + optimistic_update: LightClientOptimisticUpdate, + ) { + *self.latest_broadcasted_optimistic_update.write() = Some(optimistic_update.clone()); + } + + pub fn set_latest_broadcasted_finality_update( + &self, + finality_update: LightClientFinalityUpdate, + ) { + *self.latest_broadcasted_finality_update.write() = Some(finality_update.clone()); + } + + /// Checks if we've already broadcasted the latest optimistic update. + /// If we haven't, update the `latest_broadcasted_optimistic_update` cache + /// and return the latest optimistic update for broadcasting, else return `None`. + pub fn should_broadcast_latest_optimistic_update( + &self, + ) -> Option> { + if let Some(latest_optimistic_update) = self.get_latest_optimistic_update() { + let latest_broadcasted_optimistic_update = + self.get_latest_broadcasted_optimistic_update(); + match latest_broadcasted_optimistic_update { + Some(latest_broadcasted_optimistic_update) => { + if latest_broadcasted_optimistic_update != latest_optimistic_update { + self.set_latest_broadcasted_optimistic_update( + latest_optimistic_update.clone(), + ); + return Some(latest_optimistic_update); + } + } + None => { + self.set_latest_broadcasted_optimistic_update(latest_optimistic_update.clone()); + return Some(latest_optimistic_update); + } + } + } + + None + } + pub fn get_latest_optimistic_update(&self) -> Option> { self.latest_optimistic_update.read().clone() } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e9b2e8e6bf..2db93c0033 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2600,7 +2600,7 @@ pub fn serve( let fork_name = chain .spec - .fork_name_at_slot::(*update.signature_slot()); + .fork_name_at_slot::(update.signature_slot()); match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index aa126bbc82..57c74f8d01 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -320,6 +320,38 @@ pub fn process_signed_contribution_and_proofs( let seen_timestamp = timestamp_now(); + if let Some(latest_optimistic_update) = chain + .light_client_server_cache + .should_broadcast_latest_optimistic_update() + { + let _ = publish_pubsub_message( + &network_tx, + PubsubMessage::LightClientOptimisticUpdate(Box::new(latest_optimistic_update)), + ) + .inspect_err(|e| { + error!( + error = ?e, + "Unable to broadcast latest light client optimistic update" + ); + }); + }; + + if let Some(latest_finality_update) = chain + .light_client_server_cache + .should_broadcast_latest_finality_update() + { + let _ = publish_pubsub_message( + &network_tx, + PubsubMessage::LightClientFinalityUpdate(Box::new(latest_finality_update)), + ) + .inspect_err(|e| { + error!( + error = ?e, + "Unable to broadcast latest light client finality update" + ); + }); + }; + // Verify contributions & broadcast to the network. for (index, contribution) in signed_contribution_and_proofs.into_iter().enumerate() { let aggregator_index = contribution.message.aggregator_index; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 6bdcd02197..0b17965f3c 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1967,7 +1967,10 @@ impl NetworkBeaconProcessor { Err(e) => { metrics::register_finality_update_error(&e); match e { - LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { + LightClientFinalityUpdateError::MismatchedSignatureSlot { .. } + | LightClientFinalityUpdateError::MismatchedAttestedHeader { .. } + | LightClientFinalityUpdateError::MismatchedFinalizedHeader { .. } + | LightClientFinalityUpdateError::MismatchedProofOrSyncAggregate { .. } => { debug!( %peer_id, error = ?e, @@ -1999,6 +2002,7 @@ impl NetworkBeaconProcessor { error = ?e, "Light client error constructing finality update" ), + LightClientFinalityUpdateError::Ignore => {} } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } @@ -2080,7 +2084,9 @@ impl NetworkBeaconProcessor { } return; } - LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + LightClientOptimisticUpdateError::MismatchedSignatureSlot { .. } + | LightClientOptimisticUpdateError::MismatchedAttestedHeader { .. } + | LightClientOptimisticUpdateError::MismatchedSyncAggregate { .. } => { metrics::register_optimistic_update_error(&e); debug!( @@ -2119,6 +2125,7 @@ impl NetworkBeaconProcessor { "Light client error constructing optimistic update" ) } + LightClientOptimisticUpdateError::Ignore => {} } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 9189dcd0a0..2125b4668b 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -79,6 +79,7 @@ pub struct LightClientFinalityUpdate { /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature + #[superstruct(getter(copy))] pub signature_slot: Slot, } @@ -179,6 +180,20 @@ impl LightClientFinalityUpdate { }) } + pub fn get_attested_header_root<'a>(&'a self) -> Hash256 { + map_light_client_finality_update_ref!(&'a _, self.to_ref(), |inner, cons| { + cons(inner); + inner.attested_header.beacon.canonical_root() + }) + } + + pub fn get_finalized_header_root<'a>(&'a self) -> Hash256 { + map_light_client_finality_update_ref!(&'a _, self.to_ref(), |inner, cons| { + cons(inner); + inner.finalized_header.beacon.canonical_root() + }) + } + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { let finality_update = match fork_name { ForkName::Altair | ForkName::Bellatrix => { @@ -227,7 +242,7 @@ impl LightClientFinalityUpdate { if attested_slot > prev_slot { true } else { - attested_slot == prev_slot && signature_slot > *self.signature_slot() + attested_slot == prev_slot && signature_slot > self.signature_slot() } } } diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 5701ebd875..13e308cd27 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -60,6 +60,7 @@ pub struct LightClientOptimisticUpdate { /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature + #[superstruct(getter(copy))] pub signature_slot: Slot, } @@ -200,7 +201,7 @@ impl LightClientOptimisticUpdate { if attested_slot > prev_slot { true } else { - attested_slot == prev_slot && signature_slot > *self.signature_slot() + attested_slot == prev_slot && signature_slot > self.signature_slot() } } } diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 1b2d4024d1..e7cc9b7a4e 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -303,7 +303,7 @@ pub(crate) async fn verify_light_client_updates( } // Verify light client optimistic update. `signature_slot_distance` should be 1 in the ideal scenario. - let signature_slot = *client + let signature_slot = client .get_beacon_light_client_optimistic_update::() .await .map_err(|e| format!("Error while getting light client updates: {:?}", e))? @@ -332,7 +332,7 @@ pub(crate) async fn verify_light_client_updates( } continue; } - let signature_slot = *client + let signature_slot = client .get_beacon_light_client_finality_update::() .await .map_err(|e| format!("Error while getting light client updates: {:?}", e))? From b9c1a2b0c0997fd0530a995f7ea72f89b35ba22c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 8 Jul 2025 18:50:15 +1000 Subject: [PATCH 24/44] Fix description of DB read bytes metric (#7716) Fix a trivial typo that mixed up reads and writes. --- beacon_node/store/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 44b61e1ebe..e04e662865 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -68,7 +68,7 @@ pub static DISK_DB_WRITE_COUNT: LazyLock> = LazyLock::new( pub static DISK_DB_READ_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram( "store_disk_db_read_seconds", - "Time taken to write bytes to store.", + "Time taken to read bytes from store.", ) }); pub static DISK_DB_WRITE_TIMES: LazyLock> = LazyLock::new(|| { From 734ad90dd8792c01bb32df56196e90820057587a Mon Sep 17 00:00:00 2001 From: cakevm Date: Wed, 9 Jul 2025 07:02:41 +0200 Subject: [PATCH 25/44] Upgrade to c-kzg 2.1.0 and alloy-primitives 1.0 (#7271) Update `c-kzg` from `v1` to `v2`. My motivation here is that `alloy-consensus` now uses `c-kzg` in `v2` and this results in a conflict when using lighthouse in combination with latest alloy. I tried also to disable the `czkg` feature in alloy, but the conflict persisted. See here for the alloy update to `c-kzg v2`: https://github.com/alloy-rs/alloy/pull/2240 Error: ``` error: failed to select a version for `c-kzg`. ... versions that meet the requirements `^1` are: 1.0.3, 1.0.2, 1.0.0 the package `c-kzg` links to the native library `ckzg`, but it conflicts with a previous package which links to `ckzg` as well: package `c-kzg v2.1.0` ... which satisfies dependency `c-kzg = "^2.1"` of package `alloy-consensus v0.13.0` ... which satisfies dependency `alloy-consensus = "^0.13.0"` of package ... ... ``` - Upgrade `alloy-consensus` to `0.14.0` and disable all default features - Upgrade `c-kzg` to `v2.1.0` - Upgrade `alloy-primitives` to `1.0.0` - Adapt the code to the new API `c-kzg` - There is now `NO_PRECOMPUTE` as my understand from https://github.com/ethereum/c-kzg-4844/pull/545/files we should use `0` here as `new_from_trusted_setup_no_precomp` does not precomp. But maybe it is misleading. For all other places I used `RECOMMENDED_PRECOMP_WIDTH` because `8` is matching the recommendation. - `BYTES_PER_G1_POINT` and `BYTES_PER_G2_POINT` are no longer public in `c-kzg` - I adapted two tests that checking for the `Attestation` bitfield size. But I could not pinpoint to what has changed and why now 8 bytes less. I would be happy about any hint, and if this is correct. I found related a PR here: https://github.com/sigp/lighthouse/pull/6915 - Use same fields names, in json, as well as `c-kzg` and `rust_eth_kzg` for `g1_monomial`, `g1_lagrange`, and `g2_monomial` --- Cargo.lock | 1037 ++++++++++++++----------- Cargo.toml | 20 +- consensus/types/src/attestation.rs | 10 +- crypto/kzg/benches/benchmark.rs | 11 +- crypto/kzg/src/lib.rs | 51 +- crypto/kzg/src/trusted_setup.rs | 37 +- testing/simulator/src/basic_sim.rs | 2 +- testing/simulator/src/fallback_sim.rs | 2 +- 8 files changed, 650 insertions(+), 520 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e22c9742a..37cb553bed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -113,14 +113,14 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.12" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -140,21 +140,38 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-consensus" -version = "0.3.6" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" +checksum = "c2179ba839ac532f50279f5da2a6c5047f791f03f6f808b4dfab11327b97902f" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "c-kzg", + "alloy-trie", + "auto_impl 1.2.1", + "derive_more 2.0.1", + "either", + "once_cell", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eip2124" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "741bdd7499908b3aa0b159bba11e71c8cddd009a2c2eb7a06e825f1ec87900a5" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "thiserror 2.0.12", ] [[package]] name = "alloy-eip2930" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +checksum = "dbe3e16484669964c26ac48390245d84c410b1a5f968976076c17184725ef235" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -162,36 +179,37 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "804cefe429015b4244966c006d25bda5545fa9db5990e9c9079faf255052f50a" dependencies = [ "alloy-primitives", "alloy-rlp", + "thiserror 2.0.12", ] [[package]] name = "alloy-eips" -version = "0.3.6" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" +checksum = "609515c1955b33af3d78d26357540f68c5551a90ef58fd53def04f2aa074ec43" dependencies = [ + "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "c-kzg", - "derive_more 1.0.0", - "once_cell", - "serde", - "sha2 0.10.9", + "auto_impl 1.2.1", + "derive_more 2.0.1", + "either", + "sha2 0.10.8", ] [[package]] name = "alloy-primitives" -version = "0.8.25" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" +checksum = "70b98b99c1dcfbe74d7f0b31433ff215e7d1555e367d90e62db904f3c9d4ff53" dependencies = [ "alloy-rlp", "arbitrary", @@ -201,16 +219,16 @@ dependencies = [ "derive_arbitrary", "derive_more 2.0.1", "foldhash", - "getrandom 0.2.16", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "getrandom 0.3.1", + "hashbrown 0.15.2", + "indexmap 2.8.0", "itoa", "k256 0.13.4", "keccak-asm", "paste", "proptest", "proptest-derive", - "rand 0.8.5", + "rand 0.9.0", "ruint", "rustc-hash 2.1.1", "serde", @@ -237,7 +255,22 @@ checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", +] + +[[package]] +name = "alloy-trie" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more 2.0.1", + "nybbles", + "smallvec", + "tracing", ] [[package]] @@ -313,9 +346,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arbitrary" @@ -507,7 +540,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", "synstructure", ] @@ -519,7 +552,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -599,7 +632,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -621,18 +654,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -701,13 +734,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.3.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -726,7 +759,7 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.3.1", + "http 1.3.0", "http-body 1.0.1", "http-body-util", "itoa", @@ -752,7 +785,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.3.1", + "http 1.3.0", "http-body 1.0.1", "http-body-util", "mime", @@ -765,9 +798,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.75" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if", @@ -838,9 +871,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.3" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "bb97d56060ee67d285efb8001fec9d2a4c710c32efd2e14b5cbb5ba71930fc2d" [[package]] name = "beacon_chain" @@ -1010,7 +1043,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.101", + "syn 2.0.100", "which", ] @@ -1281,17 +1314,16 @@ dependencies = [ [[package]] name = "c-kzg" -version = "1.0.3" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +checksum = "4e7e3c397401eb76228c89561cf22f85f41c95aa799ee9d860de3ea1cbc728fc" dependencies = [ + "arbitrary", "blst", "cc", "glob", "hex", "libc", - "once_cell", - "serde", ] [[package]] @@ -1334,9 +1366,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.21" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -1390,9 +1422,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1462,9 +1494,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.37" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" +checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" dependencies = [ "clap_builder", "clap_derive", @@ -1472,9 +1504,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.37" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" +checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" dependencies = [ "anstream", "anstyle", @@ -1492,7 +1524,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -1579,12 +1611,12 @@ dependencies = [ "bs58 0.4.0", "coins-core", "digest 0.10.7", - "getrandom 0.2.16", + "getrandom 0.2.15", "hmac 0.12.1", "k256 0.11.6", "lazy_static", "serde", - "sha2 0.10.9", + "sha2 0.10.8", "thiserror 1.0.69", ] @@ -1596,12 +1628,12 @@ checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" dependencies = [ "bitvec 0.17.4", "coins-bip32", - "getrandom 0.2.16", + "getrandom 0.2.15", "hex", "hmac 0.12.1", "pbkdf2 0.11.0", "rand 0.8.5", - "sha2 0.10.9", + "sha2 0.10.8", "thiserror 1.0.69", ] @@ -1621,7 +1653,7 @@ dependencies = [ "ripemd", "serde", "serde_derive", - "sha2 0.10.9", + "sha2 0.10.8", "sha3 0.10.8", "thiserror 1.0.69", ] @@ -1868,9 +1900,24 @@ dependencies = [ "crate_crypto_internal_eth_kzg_maybe_rayon", "crate_crypto_internal_eth_kzg_polynomial", "hex", - "sha2 0.10.9", + "sha2 0.10.8", ] +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" version = "1.4.2" @@ -1991,6 +2038,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.7", + "subtle", +] + [[package]] name = "crypto-mac" version = "0.11.0" @@ -2021,9 +2078,9 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.6" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697b5419f348fd5ae2478e8018cb016c00a5881c7f46c717de98ffd135a5651c" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ "nix 0.29.0", "windows-sys 0.59.0", @@ -2053,7 +2110,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2068,12 +2125,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.11" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -2092,16 +2149,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.11" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2117,13 +2174,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.11" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.20.11", + "darling_core 0.20.10", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2148,15 +2205,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "data-encoding-macro" -version = "0.1.18" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +checksum = "9f9724adfcf41f45bf652b3995837669d73c4d49a1b5ac1ff82905ac7d9b5558" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2164,12 +2221,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.16" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" dependencies = [ "data-encoding", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2197,9 +2254,9 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" +checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" dependencies = [ "futures", "tokio", @@ -2232,9 +2289,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.10" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -2257,9 +2314,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -2283,20 +2340,20 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] name = "derive_more" -version = "0.99.20" +version = "0.99.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" +checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2325,7 +2382,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2336,7 +2393,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", "unicode-xid", ] @@ -2440,7 +2497,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2491,7 +2548,7 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.10", + "der 0.7.9", "digest 0.10.7", "elliptic-curve 0.13.8", "rfc6979 0.4.0", @@ -2519,7 +2576,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.9", + "sha2 0.10.8", "subtle", "zeroize", ] @@ -2533,7 +2590,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2653,7 +2710,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2673,7 +2730,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -2729,9 +2786,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", "windows-sys 0.59.0", @@ -2753,7 +2810,7 @@ dependencies = [ "scrypt 0.10.0", "serde", "serde_json", - "sha2 0.10.9", + "sha2 0.10.8", "sha3 0.10.8", "thiserror 1.0.69", "uuid 0.8.2", @@ -2991,14 +3048,14 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.9", + "sha2 0.10.8", ] [[package]] name = "ethereum_serde_utils" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" +checksum = "3dc1355dbb41fbbd34ec28d4fb2a57d9a70c67ac3c19f6a5ca4d4a176b9e997a" dependencies = [ "alloy-primitives", "hex", @@ -3009,9 +3066,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86da3096d1304f5f28476ce383005385459afeaf0eea08592b65ddbc9b258d16" +checksum = "9ca8ba45b63c389c6e115b095ca16381534fdcc03cf58176a3f8554db2dbe19b" dependencies = [ "alloy-primitives", "arbitrary", @@ -3025,14 +3082,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d832a5c38eba0e7ad92592f7a22d693954637fbb332b4f669590d66a5c3183e5" +checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" dependencies = [ - "darling 0.20.11", + "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -3088,7 +3145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" dependencies = [ "ethers-core", - "getrandom 0.2.16", + "getrandom 0.2.15", "reqwest", "semver 1.0.26", "serde", @@ -3131,13 +3188,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", - "auto_impl 1.3.0", + "auto_impl 1.2.1", "base64 0.13.1", "ethers-core", "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.16", + "getrandom 0.2.15", "hashers", "hex", "http 0.2.12", @@ -3173,7 +3230,7 @@ dependencies = [ "ethers-core", "hex", "rand 0.8.5", - "sha2 0.10.9", + "sha2 0.10.8", "thiserror 1.0.69", ] @@ -3196,9 +3253,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ "event-listener 5.4.0", "pin-project-lite", @@ -3323,7 +3380,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", - "auto_impl 1.3.0", + "auto_impl 1.2.1", "bytes", ] @@ -3334,7 +3391,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" dependencies = [ "arrayvec", - "auto_impl 1.3.0", + "auto_impl 1.2.1", "bytes", ] @@ -3433,9 +3490,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", "libz-sys", @@ -3450,9 +3507,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.5" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -3610,7 +3667,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -3620,7 +3677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.27", + "rustls 0.23.23", "rustls-pki-types", ] @@ -3719,9 +3776,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -3732,16 +3789,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" dependencies = [ "cfg-if", - "js-sys", "libc", - "r-efi", - "wasi 0.14.2+wasi-0.2.4", - "wasm-bindgen", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", ] [[package]] @@ -3777,7 +3832,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -3834,7 +3889,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.9.0", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -3843,17 +3898,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.3.1", - "indexmap 2.9.0", + "http 1.3.0", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -3862,9 +3917,9 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -3903,9 +3958,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", @@ -4022,9 +4077,9 @@ checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hermit-abi" -version = "0.5.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" +checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" [[package]] name = "hex" @@ -4058,7 +4113,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.1", + "rand 0.9.0", "socket2", "thiserror 2.0.12", "tinyvec", @@ -4080,7 +4135,7 @@ dependencies = [ "moka", "once_cell", "parking_lot 0.12.3", - "rand 0.9.1", + "rand 0.9.0", "resolv-conf", "smallvec", "thiserror 2.0.12", @@ -4097,13 +4152,23 @@ dependencies = [ "hmac 0.12.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + [[package]] name = "hmac" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ - "crypto-mac", + "crypto-mac 0.11.0", "digest 0.9.0", ] @@ -4116,6 +4181,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.7", + "hmac 0.8.1", +] + [[package]] name = "home" version = "0.5.11" @@ -4125,6 +4201,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + [[package]] name = "http" version = "0.2.12" @@ -4138,9 +4225,9 @@ dependencies = [ [[package]] name = "http" -version = "1.3.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "0a761d192fbf18bdef69f5ceedd0d1333afcbda0ee23840373b8317570d23c65" dependencies = [ "bytes", "fnv", @@ -4165,7 +4252,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.3.1", + "http 1.3.0", ] [[package]] @@ -4176,7 +4263,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.3.1", + "http 1.3.0", "http-body 1.0.1", "pin-project-lite", ] @@ -4264,9 +4351,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.2.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" @@ -4301,8 +4388,8 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.10", - "http 1.3.1", + "h2 0.4.8", + "http 1.3.0", "http-body 1.0.1", "httparse", "httpdate", @@ -4355,17 +4442,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.11" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.3.1", + "http 1.3.0", "http-body 1.0.1", "hyper 1.6.0", - "libc", "pin-project-lite", "socket2", "tokio", @@ -4375,17 +4461,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", - "log", "wasm-bindgen", - "windows-core 0.61.0", + "windows-core 0.52.0", ] [[package]] @@ -4438,9 +4523,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" [[package]] name = "icu_normalizer" @@ -4462,9 +4547,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" [[package]] name = "icu_properties" @@ -4483,9 +4568,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" [[package]] name = "icu_provider" @@ -4512,7 +4597,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -4585,7 +4670,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.3.1", + "http 1.3.0", "http-body-util", "hyper 1.6.0", "hyper-util", @@ -4598,20 +4683,20 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.16.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06464e726471718db9ad3fefc020529fabcde03313a0fc3967510e2db5add12" +checksum = "2830127baaaa55dae9aa5ee03158d5aa3687a9c2c11ce66870452580cc695df4" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 1.3.1", + "http 1.3.0", "http-body-util", "hyper 1.6.0", "hyper-util", "log", - "rand 0.9.1", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -4670,7 +4755,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -4685,13 +4770,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.2", "serde", ] @@ -4778,7 +4863,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2", - "widestring 1.2.0", + "widestring 1.1.0", "windows-sys 0.48.0", "winreg", ] @@ -4795,7 +4880,7 @@ version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.1", + "hermit-abi 0.5.0", "libc", "windows-sys 0.59.0", ] @@ -4841,11 +4926,10 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ - "getrandom 0.3.2", "libc", ] @@ -4883,7 +4967,7 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.9", + "sha2 0.10.8", "sha3 0.10.8", ] @@ -4897,7 +4981,7 @@ dependencies = [ "ecdsa 0.16.9", "elliptic-curve 0.13.8", "once_cell", - "sha2 0.10.9", + "sha2 0.10.8", "signature 2.2.0", ] @@ -5026,9 +5110,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.172" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libloading" @@ -5042,9 +5126,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libmdbx" @@ -5053,7 +5137,7 @@ source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52be dependencies = [ "bitflags 1.3.2", "byteorder", - "derive_more 0.99.20", + "derive_more 0.99.19", "indexmap 1.9.3", "libc", "mdbx-sys", @@ -5071,7 +5155,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.16", + "getrandom 0.2.15", "libp2p-allow-block-list", "libp2p-connection-limits", "libp2p-core", @@ -5171,7 +5255,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom 0.2.16", + "getrandom 0.2.15", "hashlink 0.9.1", "hex_fmt", "libp2p-core", @@ -5182,7 +5266,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.9", + "sha2 0.10.8", "tracing", "web-time", ] @@ -5210,22 +5294,22 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.11" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb68ea10844211a59ce46230909fd0ea040e8a192454d4cc2ee0d53e12280eb" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58 0.5.1", "ed25519-dalek", "hkdf", - "k256 0.13.4", + "libsecp256k1", "multihash", "p256", "quick-protobuf", "rand 0.8.5", "sec1 0.7.3", - "sha2 0.10.9", - "thiserror 2.0.12", + "sha2 0.10.8", + "thiserror 1.0.69", "tracing", "zeroize", ] @@ -5339,7 +5423,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring", - "rustls 0.23.27", + "rustls 0.23.23", "socket2", "thiserror 2.0.12", "tokio", @@ -5378,7 +5462,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -5409,7 +5493,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring", - "rustls 0.23.27", + "rustls 0.23.23", "rustls-webpki 0.101.7", "thiserror 2.0.12", "x509-parser", @@ -5456,6 +5540,54 @@ dependencies = [ "libc", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsqlite3-sys" version = "0.25.2" @@ -5469,9 +5601,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -5628,9 +5760,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" [[package]] name = "litemap" @@ -5661,13 +5793,13 @@ dependencies = [ [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", - "thiserror 2.0.12", + "thiserror 1.0.69", "windows-sys 0.59.0", ] @@ -5691,9 +5823,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "logging" @@ -5715,9 +5847,9 @@ dependencies = [ [[package]] name = "logroller" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90536db32a1cb3672665cdf3269bf030b0f395fabee863895c27b75b9f7a8a7d" +checksum = "83db12bbf439ebe64c0b0e4402f435b6f866db498fc1ae17e1b5d1a01625e2be" dependencies = [ "chrono", "flate2", @@ -5744,7 +5876,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.2", ] [[package]] @@ -5781,6 +5913,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.1.0" @@ -5878,9 +6016,9 @@ dependencies = [ [[package]] name = "milhouse" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1ada1f56cc1c79f40517fdcbf57e19f60424a3a1ce372c3fe9b22e4fdd83eb" +checksum = "bdc758ed0c2597254f45baa97c8aa35f44ae0c8b04ddc355f135ced531f316d6" dependencies = [ "alloy-primitives", "arbitrary", @@ -5923,9 +6061,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", ] @@ -5970,7 +6108,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -5982,7 +6120,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -5995,13 +6133,13 @@ dependencies = [ "bytes", "colored", "futures-util", - "http 1.3.1", + "http 1.3.0", "http-body 1.0.1", "http-body-util", "hyper 1.6.0", "hyper-util", "log", - "rand 0.9.1", + "rand 0.9.0", "regex", "serde_json", "serde_urlencoded", @@ -6025,7 +6163,7 @@ dependencies = [ "smallvec", "tagptr", "thiserror 1.0.69", - "uuid 1.16.0", + "uuid 1.15.1", ] [[package]] @@ -6234,7 +6372,7 @@ dependencies = [ "futures", "genesis", "hex", - "igd-next 0.16.1", + "igd-next 0.16.0", "itertools 0.10.5", "k256 0.13.4", "kzg", @@ -6422,6 +6560,16 @@ dependencies = [ "libc", ] +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "const-hex", + "smallvec", +] + [[package]] name = "object" version = "0.36.7" @@ -6442,9 +6590,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.3" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" [[package]] name = "oneshot_broadcast" @@ -6478,7 +6626,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", - "auto_impl 1.3.0", + "auto_impl 1.2.1", "bytes", "ethereum-types 0.14.1", "open-fastrlp-derive", @@ -6519,7 +6667,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -6530,18 +6678,18 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.5.0+3.5.0" +version = "300.4.2+3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" +checksum = "168ce4e058f975fe43e89d9ccf78ca668601887ae736090aacc23ae353c298e2" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" dependencies = [ "cc", "libc", @@ -6596,7 +6744,7 @@ dependencies = [ "ecdsa 0.16.9", "elliptic-curve 0.13.8", "primeorder", - "sha2 0.10.9", + "sha2 0.10.8", ] [[package]] @@ -6659,7 +6807,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -6711,7 +6859,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.12", + "redox_syscall 0.5.10", "smallvec", "windows-targets 0.52.6", ] @@ -6739,7 +6887,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" dependencies = [ - "crypto-mac", + "crypto-mac 0.11.0", ] [[package]] @@ -6751,7 +6899,7 @@ dependencies = [ "digest 0.10.7", "hmac 0.12.1", "password-hash", - "sha2 0.10.9", + "sha2 0.10.8", ] [[package]] @@ -6781,9 +6929,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.8.0" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", "thiserror 2.0.12", @@ -6817,7 +6965,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -6848,7 +6996,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.10", + "der 0.7.9", "spki 0.7.3", ] @@ -6948,7 +7096,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy", + "zerocopy 0.8.23", ] [[package]] @@ -6987,12 +7135,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.32" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +checksum = "f1ccf34da56fc294e7d4ccf69a85992b7dfb826b7cf57bac6a70bba3494cc08a" dependencies = [ "proc-macro2", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -7047,7 +7195,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ - "toml_edit 0.22.26", + "toml_edit 0.22.24", ] [[package]] @@ -7076,9 +7224,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -7132,7 +7280,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -7163,7 +7311,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -7186,7 +7334,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -7219,7 +7367,7 @@ checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", - "derive_more 0.99.20", + "derive_more 0.99.19", "glob", "mach2", "nix 0.24.3", @@ -7270,48 +7418,46 @@ dependencies = [ [[package]] name = "quickcheck_macros" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71ee38b42f8459a88d3362be6f9b841ad2d5421844f61eb1c59c11bff3ac14a" +checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 1.0.109", ] [[package]] name = "quinn" -version = "0.11.7" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", - "cfg_aliases", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.27", + "rustls 0.23.23", "socket2", "thiserror 2.0.12", "tokio", "tracing", - "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.11" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcbafbbdbb0f638fe3f35f3c56739f77a8a1d070cb25603226c83339b391472b" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom 0.3.2", - "rand 0.9.1", + "getrandom 0.2.15", + "rand 0.8.5", "ring", "rustc-hash 2.1.1", - "rustls 0.23.27", + "rustls 0.23.23", "rustls-pki-types", "slab", "thiserror 2.0.12", @@ -7322,9 +7468,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" dependencies = [ "cfg_aliases", "libc", @@ -7336,19 +7482,13 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.40" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" dependencies = [ "proc-macro2", ] -[[package]] -name = "r-efi" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" - [[package]] name = "r2d2" version = "0.8.10" @@ -7397,17 +7537,18 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", - "serde", ] [[package]] name = "rand" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", + "serde", + "zerocopy 0.8.23", ] [[package]] @@ -7436,7 +7577,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.15", ] [[package]] @@ -7445,7 +7586,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.1", + "serde", ] [[package]] @@ -7492,9 +7634,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.5.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34bc6763177194266fc3773e2b2bb3693f7b02fdf461e285aa33202e3164b74e" +checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" dependencies = [ "libc", ] @@ -7510,9 +7652,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ "bitflags 2.9.0", ] @@ -7523,7 +7665,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.15", "libredox", "thiserror 1.0.69", ] @@ -7636,9 +7778,13 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7c8f7f733062b66dc1c63f9db168ac0b97a9210e247fa90fdc9ad08f51b302" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] [[package]] name = "rfc6979" @@ -7669,7 +7815,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.15", "libc", "untrusted", "windows-sys 0.52.0", @@ -7762,7 +7908,7 @@ dependencies = [ "primitive-types 0.12.2", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.0", "rlp", "ruint-macro", "serde", @@ -7884,14 +8030,14 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" dependencies = [ "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys 0.9.4", + "linux-raw-sys 0.9.2", "windows-sys 0.59.0", ] @@ -7923,14 +8069,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.2", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -7955,12 +8101,11 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time", - "zeroize", ] [[package]] @@ -7984,17 +8129,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "rustls-webpki" -version = "0.103.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437" -dependencies = [ - "ring", - "rustls-pki-types", - "untrusted", -] - [[package]] name = "rustversion" version = "1.0.20" @@ -8082,7 +8216,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -8136,7 +8270,7 @@ dependencies = [ "hmac 0.12.1", "pbkdf2 0.11.0", "salsa20 0.10.2", - "sha2 0.10.9", + "sha2 0.10.8", ] [[package]] @@ -8170,7 +8304,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.10", + "der 0.7.9", "generic-array 0.14.7", "pkcs8 0.10.2", "subtle", @@ -8252,9 +8386,9 @@ dependencies = [ [[package]] name = "serde-aux" -version = "4.7.0" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "207f67b28fe90fb596503a9bf0bf1ea5e831e21307658e177c5dfcdfc3ab8a0a" +checksum = "5290c39c5f6992b9dddbda28541d965dba46468294e6018a408fa297e6c602de" dependencies = [ "serde", "serde-value", @@ -8289,7 +8423,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -8312,7 +8446,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -8333,7 +8467,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -8378,9 +8512,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.9" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -8436,9 +8570,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8605,9 +8739,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" dependencies = [ "arbitrary", ] @@ -8631,15 +8765,15 @@ dependencies = [ "rand_core 0.6.4", "ring", "rustc_version 0.4.1", - "sha2 0.10.9", + "sha2 0.10.8", "subtle", ] [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -8668,14 +8802,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.10", + "der 0.7.9", ] [[package]] name = "ssz_types" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad0fa7e9a85c06d0a6ba5100d733fff72e231eb6db2d86078225cf716fd2d95" +checksum = "75b55bedc9a18ed2860a46d6beb4f4082416ee1d60be0cc364cebdcdddc7afd4" dependencies = [ "arbitrary", "ethereum_serde_utils", @@ -8849,9 +8983,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -8872,13 +9006,13 @@ checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "synstructure" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -8987,14 +9121,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.19.1" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "2c317e0a526ee6120d8dabad239c8dadca62b24b6f168914bbbc8e2fb1f0e567" dependencies = [ + "cfg-if", "fastrand", - "getrandom 0.3.2", + "getrandom 0.3.1", "once_cell", - "rustix 1.0.7", + "rustix 1.0.2", "windows-sys 0.59.0", ] @@ -9013,7 +9148,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.7", + "rustix 1.0.2", "windows-sys 0.59.0", ] @@ -9057,7 +9192,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -9068,7 +9203,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -9123,9 +9258,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" dependencies = [ "deranged", "itoa", @@ -9138,15 +9273,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" dependencies = [ "num-conv", "time-core", @@ -9175,7 +9310,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash 1.1.0", - "sha2 0.10.9", + "sha2 0.10.8", "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", @@ -9228,9 +9363,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" dependencies = [ "backtrace", "bytes", @@ -9263,7 +9398,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -9311,9 +9446,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -9326,9 +9461,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" [[package]] name = "toml_edit" @@ -9336,20 +9471,20 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.8.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.8.0", "toml_datetime", - "winnow 0.7.10", + "winnow 0.7.3", ] [[package]] @@ -9363,8 +9498,8 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.10", - "http 1.3.1", + "h2 0.4.8", + "http 1.3.0", "http-body 1.0.1", "http-body-util", "hyper 1.6.0", @@ -9460,7 +9595,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -9527,9 +9662,9 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c58eb0f518840670270d90d97ffee702d8662d9c5494870c9e1e9e0fa00f668" +checksum = "ee44f4cef85f88b4dea21c0b1f58320bdf35715cf56d840969487cff00613321" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -9540,14 +9675,14 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "699e7fb6b3fdfe0c809916f251cf5132d64966858601695c3736630a87e7166a" +checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" dependencies = [ - "darling 0.20.11", + "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -9793,17 +9928,17 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.15", "serde", ] [[package]] name = "uuid" -version = "1.16.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.1", ] [[package]] @@ -10113,9 +10248,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.13.3+wasi-0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" dependencies = [ "wit-bindgen-rt", ] @@ -10142,7 +10277,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -10177,7 +10312,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10295,9 +10430,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -10362,6 +10497,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.53.0" @@ -10378,26 +10522,13 @@ version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ - "windows-implement 0.58.0", - "windows-interface 0.58.0", + "windows-implement", + "windows-interface", "windows-result 0.2.0", - "windows-strings 0.1.0", + "windows-strings", "windows-targets 0.52.6", ] -[[package]] -name = "windows-core" -version = "0.61.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" -dependencies = [ - "windows-implement 0.60.0", - "windows-interface 0.59.1", - "windows-link", - "windows-result 0.3.2", - "windows-strings 0.4.0", -] - [[package]] name = "windows-implement" version = "0.58.0" @@ -10406,18 +10537,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", -] - -[[package]] -name = "windows-implement" -version = "0.60.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -10428,25 +10548,14 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", -] - -[[package]] -name = "windows-interface" -version = "0.59.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" [[package]] name = "windows-result" @@ -10466,15 +10575,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-result" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.1.0" @@ -10485,15 +10585,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-strings" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -10719,9 +10810,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" dependencies = [ "memchr", ] @@ -10738,9 +10829,9 @@ dependencies = [ [[package]] name = "wit-bindgen-rt" -version = "0.39.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ "bitflags 2.9.0", ] @@ -10844,9 +10935,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.26" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62ce76d9b56901b19a74f19431b0d8b3bc7ca4ad685a746dfd78ca8f4fc6bda" +checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" [[package]] name = "xmltree" @@ -10928,28 +11019,48 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" +dependencies = [ + "zerocopy-derive 0.8.23", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", ] [[package]] @@ -10969,7 +11080,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", "synstructure", ] @@ -10991,7 +11102,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -11013,7 +11124,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.100", ] [[package]] @@ -11051,7 +11162,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ - "zstd-safe 7.2.4", + "zstd-safe 7.2.3", ] [[package]] @@ -11066,18 +11177,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.4" +version = "7.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 6a7b2f610e..817c2f2d80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,8 +98,8 @@ edition = "2021" [workspace.dependencies] account_utils = { path = "common/account_utils" } -alloy-consensus = "0.3.0" -alloy-primitives = { version = "0.8", features = ["rlp", "getrandom"] } +alloy-consensus = { version = "0.14.0", default-features = false } +alloy-primitives = { version = "1.0", features = ["rlp", "getrandom"] } alloy-rlp = "0.3.4" anyhow = "1" arbitrary = { version = "1", features = ["derive"] } @@ -116,7 +116,7 @@ byteorder = "1" bytes = "1" # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. -c-kzg = { version = "1", default-features = false } +c-kzg = { version = "2.1.0", default-features = false } cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } @@ -143,9 +143,9 @@ eth2_keystore = { path = "crypto/eth2_keystore" } eth2_network_config = { path = "common/eth2_network_config" } eth2_wallet = { path = "crypto/eth2_wallet" } ethereum_hashing = "0.7.0" -ethereum_serde_utils = "0.7" -ethereum_ssz = "0.8.2" -ethereum_ssz_derive = "0.8.2" +ethereum_serde_utils = "0.8.0" +ethereum_ssz = "0.9.0" +ethereum_ssz_derive = "0.9.0" ethers-core = "1" ethers-middleware = { version = "1", default-features = false } ethers-providers = { version = "1", default-features = false } @@ -184,7 +184,7 @@ malloc_utils = { path = "common/malloc_utils" } maplit = "1" merkle_proof = { path = "consensus/merkle_proof" } metrics = { path = "common/metrics" } -milhouse = "0.5" +milhouse = "0.6" mockall = "0.13" mockall_double = "0.3" mockito = "1.5.0" @@ -230,7 +230,7 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = "0.10" +ssz_types = "0.11.0" state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } @@ -254,8 +254,8 @@ tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } -tree_hash = "0.9" -tree_hash_derive = "0.9" +tree_hash = "0.10.0" +tree_hash_derive = "0.10.0" types = { path = "consensus/types" } unused_port = { path = "common/unused_port" } url = "2" diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 569d73820c..de0e86489d 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -661,12 +661,12 @@ mod tests { let attestation_data = size_of::(); let signature = size_of::(); - assert_eq!(aggregation_bits, 152); + assert_eq!(aggregation_bits, 144); assert_eq!(attestation_data, 128); assert_eq!(signature, 288 + 16); let attestation_expected = aggregation_bits + attestation_data + signature; - assert_eq!(attestation_expected, 584); + assert_eq!(attestation_expected, 576); assert_eq!( size_of::>(), attestation_expected @@ -684,13 +684,13 @@ mod tests { size_of::::MaxCommitteesPerSlot>>(); let signature = size_of::(); - assert_eq!(aggregation_bits, 152); - assert_eq!(committee_bits, 152); + assert_eq!(aggregation_bits, 144); + assert_eq!(committee_bits, 144); assert_eq!(attestation_data, 128); assert_eq!(signature, 288 + 16); let attestation_expected = aggregation_bits + committee_bits + attestation_data + signature; - assert_eq!(attestation_expected, 736); + assert_eq!(attestation_expected, 720); assert_eq!( size_of::>(), attestation_expected diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 234e624698..a8904741c0 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -1,6 +1,6 @@ use c_kzg::KzgSettings; use criterion::{criterion_group, criterion_main, Criterion}; -use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; +use kzg::{trusted_setup::get_trusted_setup, TrustedSetup, NO_PRECOMPUTE}; use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; pub fn bench_init_context(c: &mut Criterion) { @@ -25,8 +25,13 @@ pub fn bench_init_context(c: &mut Criterion) { serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); - KzgSettings::load_trusted_setup(&trusted_setup.g1_points(), &trusted_setup.g2_points()) - .unwrap() + KzgSettings::load_trusted_setup( + &trusted_setup.g1_monomial(), + &trusted_setup.g1_lagrange(), + &trusted_setup.g2_monomial(), + NO_PRECOMPUTE, + ) + .unwrap() }) }); } diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 5d752cc0a5..11b133e4a3 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -21,6 +21,13 @@ pub use rust_eth_kzg::{ Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, }; +/// Disables the fixed-base multi-scalar multiplication optimization for computing +/// cell KZG proofs, because `rust-eth-kzg` already handles the precomputation. +/// +/// Details about `precompute` parameter can be found here: +/// +pub const NO_PRECOMPUTE: u64 = 0; + // Note: `spec.number_of_columns` is a config and should match `CELLS_PER_EXT_BLOB` - however this // is a constant in the KZG library - be aware that overriding `number_of_columns` will break KZG // operations. @@ -65,8 +72,10 @@ impl Kzg { Ok(Self { trusted_setup: KzgSettings::load_trusted_setup( - &trusted_setup.g1_points(), - &trusted_setup.g2_points(), + &trusted_setup.g1_monomial(), + &trusted_setup.g1_lagrange(), + &trusted_setup.g2_monomial(), + NO_PRECOMPUTE, )?, context, }) @@ -85,8 +94,10 @@ impl Kzg { Ok(Self { trusted_setup: KzgSettings::load_trusted_setup( - &trusted_setup.g1_points(), - &trusted_setup.g2_points(), + &trusted_setup.g1_monomial(), + &trusted_setup.g1_lagrange(), + &trusted_setup.g2_monomial(), + NO_PRECOMPUTE, )?, context, }) @@ -111,8 +122,10 @@ impl Kzg { Ok(Self { trusted_setup: KzgSettings::load_trusted_setup( - &trusted_setup.g1_points(), - &trusted_setup.g2_points(), + &trusted_setup.g1_monomial(), + &trusted_setup.g1_lagrange(), + &trusted_setup.g2_monomial(), + NO_PRECOMPUTE, )?, context, }) @@ -128,7 +141,8 @@ impl Kzg { blob: &Blob, kzg_commitment: KzgCommitment, ) -> Result { - c_kzg::KzgProof::compute_blob_kzg_proof(blob, &kzg_commitment.into(), &self.trusted_setup) + self.trusted_setup + .compute_blob_kzg_proof(blob, &kzg_commitment.into()) .map(|proof| KzgProof(proof.to_bytes().into_inner())) .map_err(Into::into) } @@ -140,11 +154,10 @@ impl Kzg { kzg_commitment: KzgCommitment, kzg_proof: KzgProof, ) -> Result<(), Error> { - if !c_kzg::KzgProof::verify_blob_kzg_proof( + if !self.trusted_setup.verify_blob_kzg_proof( blob, &kzg_commitment.into(), &kzg_proof.into(), - &self.trusted_setup, )? { Err(Error::KzgVerificationFailed) } else { @@ -172,11 +185,10 @@ impl Kzg { .map(|proof| Bytes48::from(*proof)) .collect::>(); - if !c_kzg::KzgProof::verify_blob_kzg_proof_batch( + if !self.trusted_setup.verify_blob_kzg_proof_batch( blobs, &commitments_bytes, &proofs_bytes, - &self.trusted_setup, )? { Err(Error::KzgVerificationFailed) } else { @@ -186,7 +198,8 @@ impl Kzg { /// Converts a blob to a kzg commitment. pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result { - c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup) + self.trusted_setup + .blob_to_kzg_commitment(blob) .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) .map_err(Into::into) } @@ -197,7 +210,8 @@ impl Kzg { blob: &Blob, z: &Bytes32, ) -> Result<(KzgProof, Bytes32), Error> { - c_kzg::KzgProof::compute_kzg_proof(blob, z, &self.trusted_setup) + self.trusted_setup + .compute_kzg_proof(blob, z) .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) .map_err(Into::into) } @@ -210,14 +224,9 @@ impl Kzg { y: &Bytes32, kzg_proof: KzgProof, ) -> Result { - c_kzg::KzgProof::verify_kzg_proof( - &kzg_commitment.into(), - z, - y, - &kzg_proof.into(), - &self.trusted_setup, - ) - .map_err(Into::into) + self.trusted_setup + .verify_kzg_proof(&kzg_commitment.into(), z, y, &kzg_proof.into()) + .map_err(Into::into) } /// Computes the cells and associated proofs for a given `blob`. diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index 7aaa1d9919..66ae914c54 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -1,10 +1,14 @@ use crate::PeerDASTrustedSetup; -use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT}; use serde::{ de::{self, Deserializer, Visitor}, Deserialize, Serialize, }; +// Number of bytes per G1 point. +const BYTES_PER_G1_POINT: usize = 48; +// Number of bytes per G2 point. +const BYTES_PER_G2_POINT: usize = 96; + pub const TRUSTED_SETUP_BYTES: &[u8] = include_bytes!("../trusted_setup.json"); pub fn get_trusted_setup() -> Vec { @@ -23,30 +27,31 @@ struct G2Point([u8; BYTES_PER_G2_POINT]); /// `c_kzg::KzgSettings` object. /// /// The serialize/deserialize implementations are written according to -/// the format specified in the the ethereum consensus specs trusted setup files. +/// the format specified in the ethereum consensus specs trusted setup files. /// /// See https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TrustedSetup { - #[serde(rename = "g1_monomial")] - g1_monomial_points: Vec, - #[serde(rename = "g1_lagrange")] - g1_points: Vec, - #[serde(rename = "g2_monomial")] - g2_points: Vec, + g1_monomial: Vec, + g1_lagrange: Vec, + g2_monomial: Vec, } impl TrustedSetup { - pub fn g1_points(&self) -> Vec<[u8; BYTES_PER_G1_POINT]> { - self.g1_points.iter().map(|p| p.0).collect() + pub fn g1_monomial(&self) -> Vec { + self.g1_monomial.iter().flat_map(|p| p.0).collect() } - pub fn g2_points(&self) -> Vec<[u8; BYTES_PER_G2_POINT]> { - self.g2_points.iter().map(|p| p.0).collect() + pub fn g1_lagrange(&self) -> Vec { + self.g1_lagrange.iter().flat_map(|p| p.0).collect() + } + + pub fn g2_monomial(&self) -> Vec { + self.g2_monomial.iter().flat_map(|p| p.0).collect() } pub fn g1_len(&self) -> usize { - self.g1_points.len() + self.g1_lagrange.len() } } @@ -54,17 +59,17 @@ impl From<&TrustedSetup> for PeerDASTrustedSetup { fn from(trusted_setup: &TrustedSetup) -> Self { Self { g1_monomial: trusted_setup - .g1_monomial_points + .g1_monomial .iter() .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) .collect::>(), g1_lagrange: trusted_setup - .g1_points + .g1_lagrange .iter() .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) .collect::>(), g2_monomial: trusted_setup - .g2_points + .g2_monomial .iter() .map(|g2_point| format!("0x{}", hex::encode(g2_point.0))) .collect::>(), diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 1c27ca7792..ae9f6c0cc6 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -25,7 +25,7 @@ use tracing::Level; use types::{Epoch, EthSpec, MinimalEthSpec}; const END_EPOCH: u64 = 16; -const GENESIS_DELAY: u64 = 32; +const GENESIS_DELAY: u64 = 38; const ALTAIR_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0; const CAPELLA_FORK_EPOCH: u64 = 0; diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 2d0cacd941..f60ce5fc09 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -21,7 +21,7 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; use types::{Epoch, EthSpec, MinimalEthSpec}; const END_EPOCH: u64 = 16; -const GENESIS_DELAY: u64 = 32; +const GENESIS_DELAY: u64 = 38; const ALTAIR_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0; const CAPELLA_FORK_EPOCH: u64 = 1; From 8e55684b066fc2bdd113d586ecf8947c3db5d371 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 9 Jul 2025 18:08:27 +1000 Subject: [PATCH 26/44] Reintroduce `--logfile` with deprecation warning (#7723) Reintroduce the `--logfile` flag with a deprecation warning so that it doesn't prevent nodes from starting. This is considered preferable to breaking node startups so that users fix the flag, even though it means the `--logfile` flag is completely ineffective. The flag was initially removed in: - https://github.com/sigp/lighthouse/pull/6339 --- beacon_node/src/config.rs | 2 +- lighthouse/src/main.rs | 15 +++++++++++++++ lighthouse/tests/beacon_node.rs | 7 +++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3c6339c03e..f55b91d58c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -278,7 +278,7 @@ pub fn get_config( } if clap_utils::parse_optional::(cli_args, "eth1-cache-follow-distance")?.is_some() { - warn!("The eth1-purge-cache flag is deprecated"); + warn!("The eth1-cache-follow-distance flag is deprecated"); } // `--execution-endpoint` is required now. diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index bbd8f764e7..10168d026f 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -126,6 +126,16 @@ fn main() { .global(true) .display_order(0), ) + .arg( + Arg::new("logfile") + .long("logfile") + .value_name("PATH") + .help("DEPRECATED") + .action(ArgAction::Set) + .global(true) + .hide(true) + .display_order(0) + ) .arg( Arg::new("logfile-dir") .long("logfile-dir") @@ -701,6 +711,11 @@ fn run( // Allow Prometheus access to the version and commit of the Lighthouse build. metrics::expose_lighthouse_version(); + // DEPRECATED: can be removed in v7.2.0/v8.0.0. + if clap_utils::parse_optional::(matches, "logfile")?.is_some() { + warn!("The --logfile flag is deprecated and replaced by --logfile-dir"); + } + #[cfg(all(feature = "modern", target_arch = "x86_64"))] if !std::is_x86_feature_detected!("adx") { tracing::warn!( diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7a5a83cb29..884e5eddeb 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2476,6 +2476,13 @@ fn logfile_format_flag() { ) }); } +// DEPRECATED but should not crash. +#[test] +fn deprecated_logfile() { + CommandLineTest::new() + .flag("logfile", Some("test.txt")) + .run_with_zero_port(); +} // DEPRECATED but should not crash. #[test] From 8b5ccacac9c05e447a41d615433f2d76d4e1ca08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 9 Jul 2025 15:26:51 +0100 Subject: [PATCH 27/44] Error from RPC `send_response` when request doesn't exist on the active inbound requests (#7663) Lighthouse is currently loggign a lot errors in the `RPC` behaviour whenever a response is received for a request_id that no longer exists in active_inbound_requests. This is likely due to a data race or timing issue (e.g., the peer disconnecting before the response is handled). This PR addresses that by removing the error logging from the RPC layer. Instead, RPC::send_response now simply returns an Err, shifting the responsibility to the main service. The main service can then determine whether the peer is still connected and only log an error if the peer remains connected. Thanks @ackintosh for helping debug! --- beacon_node/lighthouse_network/src/rpc/mod.rs | 74 ++++++++++++++----- .../lighthouse_network/src/service/mod.rs | 63 +++++----------- beacon_node/network/src/service.rs | 10 ++- 3 files changed, 81 insertions(+), 66 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 0619908bb6..9807387a17 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -17,7 +17,7 @@ use std::marker::PhantomData; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; -use tracing::{debug, error, instrument, trace}; +use tracing::{debug, instrument, trace}; use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; @@ -98,6 +98,13 @@ pub struct InboundRequestId { substream_id: SubstreamId, } +// An Active inbound request received via Rpc. +struct ActiveInboundRequest { + pub peer_id: PeerId, + pub request_type: RequestType, + pub peer_disconnected: bool, +} + impl InboundRequestId { /// Creates an _unchecked_ [`InboundRequestId`]. /// @@ -150,7 +157,7 @@ pub struct RPC { /// Rate limiter for our own requests. outbound_request_limiter: SelfRateLimiter, /// Active inbound requests that are awaiting a response. - active_inbound_requests: HashMap)>, + active_inbound_requests: HashMap>, /// Queue of events to be processed. events: Vec>, fork_context: Arc, @@ -199,8 +206,7 @@ impl RPC { } /// Sends an RPC response. - /// - /// The peer must be connected for this to succeed. + /// Returns an `Err` if the request does exist in the active inbound requests list. #[instrument(parent = None, level = "trace", fields(service = "libp2p_rpc"), @@ -209,14 +215,16 @@ impl RPC { )] pub fn send_response( &mut self, - peer_id: PeerId, request_id: InboundRequestId, response: RpcResponse, - ) { - let Some((_peer_id, request_type)) = self.active_inbound_requests.remove(&request_id) + ) -> Result<(), RpcResponse> { + let Some(ActiveInboundRequest { + peer_id, + request_type, + peer_disconnected, + }) = self.active_inbound_requests.remove(&request_id) else { - error!(%peer_id, ?request_id, %response, "Request not found in active_inbound_requests. Response not sent"); - return; + return Err(response); }; // Add the request back to active requests if the response is `Success` and requires stream @@ -224,11 +232,24 @@ impl RPC { if request_type.protocol().terminator().is_some() && matches!(response, RpcResponse::Success(_)) { - self.active_inbound_requests - .insert(request_id, (peer_id, request_type.clone())); + self.active_inbound_requests.insert( + request_id, + ActiveInboundRequest { + peer_id, + request_type: request_type.clone(), + peer_disconnected, + }, + ); + } + + if peer_disconnected { + trace!(%peer_id, ?request_id, %response, + "Discarding response, peer is no longer connected"); + return Ok(()); } self.send_response_inner(peer_id, request_type.protocol(), request_id, response); + Ok(()) } fn send_response_inner( @@ -425,9 +446,10 @@ where self.events.push(error_msg); } - self.active_inbound_requests.retain( - |_inbound_request_id, (request_peer_id, _request_type)| *request_peer_id != peer_id, - ); + self.active_inbound_requests + .values_mut() + .filter(|request| request.peer_id == peer_id) + .for_each(|request| request.peer_disconnected = true); if let Some(limiter) = self.response_limiter.as_mut() { limiter.peer_disconnected(peer_id); @@ -468,9 +490,17 @@ where .active_inbound_requests .iter() .filter( - |(_inbound_request_id, (request_peer_id, active_request_type))| { + |( + _inbound_request_id, + ActiveInboundRequest { + peer_id: request_peer_id, + request_type: active_request_type, + peer_disconnected, + }, + )| { *request_peer_id == peer_id && active_request_type.protocol() == request_type.protocol() + && !peer_disconnected }, ) .count() @@ -494,19 +524,25 @@ where } // Requests that are below the limit on the number of simultaneous requests are added to the active inbound requests. - self.active_inbound_requests - .insert(request_id, (peer_id, request_type.clone())); + self.active_inbound_requests.insert( + request_id, + ActiveInboundRequest { + peer_id, + request_type: request_type.clone(), + peer_disconnected: false, + }, + ); // If we received a Ping, we queue a Pong response. if let RequestType::Ping(_) = request_type { trace!(connection_id = %connection_id, %peer_id, "Received Ping, queueing Pong"); self.send_response( - peer_id, request_id, RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data: self.seq_number, })), - ); + ) + .expect("Request to exist"); } self.events.push(ToSwarm::GenerateEvent(RPCMessage { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e2c6f24405..0f5745a3a2 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -11,8 +11,7 @@ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY use crate::rpc::methods::MetadataRequest; use crate::rpc::{ GoodbyeReason, HandlerErr, InboundRequestId, NetworkParams, Protocol, RPCError, RPCMessage, - RPCReceived, RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, - RpcSuccessResponse, RPC, + RPCReceived, RequestType, ResponseTermination, RpcResponse, RpcSuccessResponse, RPC, }; use crate::types::{ all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, @@ -39,7 +38,7 @@ use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use tracing::{debug, info, instrument, trace, warn}; +use tracing::{debug, error, info, instrument, trace, warn}; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; @@ -1146,35 +1145,22 @@ impl Network { name = "libp2p", skip_all )] - pub fn send_response( + pub fn send_response>>( &mut self, peer_id: PeerId, inbound_request_id: InboundRequestId, - response: Response, + response: T, ) { - self.eth2_rpc_mut() - .send_response(peer_id, inbound_request_id, response.into()) - } - - /// Inform the peer that their request produced an error. - #[instrument(parent = None, - level = "trace", - fields(service = "libp2p"), - name = "libp2p", - skip_all - )] - pub fn send_error_response( - &mut self, - peer_id: PeerId, - inbound_request_id: InboundRequestId, - error: RpcErrorResponse, - reason: String, - ) { - self.eth2_rpc_mut().send_response( - peer_id, - inbound_request_id, - RpcResponse::Error(error, reason.into()), - ) + if let Err(response) = self + .eth2_rpc_mut() + .send_response(inbound_request_id, response.into()) + { + if self.network_globals.peers.read().is_connected(&peer_id) { + error!(%peer_id, ?inbound_request_id, %response, + "Request not found in RPC active requests" + ); + } + } } /* Peer management functions */ @@ -1460,19 +1446,6 @@ impl Network { name = "libp2p", skip_all )] - fn send_meta_data_response( - &mut self, - _req: MetadataRequest, - inbound_request_id: InboundRequestId, - peer_id: PeerId, - ) { - let metadata = self.network_globals.local_metadata.read().clone(); - // The encoder is responsible for sending the negotiated version of the metadata - let event = RpcResponse::Success(RpcSuccessResponse::MetaData(Arc::new(metadata))); - self.eth2_rpc_mut() - .send_response(peer_id, inbound_request_id, event); - } - // RPC Propagation methods /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. #[must_use = "return the response"] @@ -1760,9 +1733,13 @@ impl Network { self.peer_manager_mut().ping_request(&peer_id, ping.data); None } - RequestType::MetaData(req) => { + RequestType::MetaData(_req) => { // send the requested meta-data - self.send_meta_data_response(req, inbound_request_id, peer_id); + let metadata = self.network_globals.local_metadata.read().clone(); + // The encoder is responsible for sending the negotiated version of the metadata + let response = + RpcResponse::Success(RpcSuccessResponse::MetaData(Arc::new(metadata))); + self.send_response(peer_id, inbound_request_id, response); None } RequestType::Goodbye(reason) => { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0a6d515232..89f71dc367 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -11,6 +11,7 @@ use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; +use lighthouse_network::rpc::methods::RpcResponse; use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::RequestType; use lighthouse_network::service::Network; @@ -627,10 +628,11 @@ impl NetworkService { error, inbound_request_id, reason, - } => { - self.libp2p - .send_error_response(peer_id, inbound_request_id, error, reason); - } + } => self.libp2p.send_response( + peer_id, + inbound_request_id, + RpcResponse::Error(error, reason.into()), + ), NetworkMessage::ValidationResult { propagation_source, message_id, From cfb1f7331064b758c6786e4e1dc15507af5ff5d1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 10 Jul 2025 11:44:46 +1000 Subject: [PATCH 28/44] Release v7.1.0 (#7609) Post-Pectra release for tree-states hot :tada: Already merged to `release-v7.1.0`: - https://github.com/sigp/lighthouse/pull/7444 - https://github.com/sigp/lighthouse/pull/6750 - https://github.com/sigp/lighthouse/pull/7437 - https://github.com/sigp/lighthouse/pull/7133 - https://github.com/sigp/lighthouse/pull/7620 - https://github.com/sigp/lighthouse/pull/7663 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e22c9742a..8f4dec0da2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -904,7 +904,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.1.0-beta.0" +version = "7.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -1179,7 +1179,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.1.0-beta.0" +version = "7.1.0" dependencies = [ "beacon_node", "bytes", @@ -4966,7 +4966,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.1.0-beta.0" +version = "7.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -5480,7 +5480,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.1.0-beta.0" +version = "7.1.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 596419c33e..456376e79b 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.1.0-beta.0" +version = "7.1.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index b20708e7b0..238efd591a 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.1.0-beta.0-", - fallback = "Lighthouse/v7.1.0-beta.0" + prefix = "Lighthouse/v7.1.0-", + fallback = "Lighthouse/v7.1.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.1.0-beta.0" + "7.1.0" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b39feb5011..a54c10dc68 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.1.0-beta.0" +version = "7.1.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index fdda1696b1..6a8fa00c1e 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.1.0-beta.0" +version = "7.1.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From 3826fe91f4e02a0c0a1f093e25f82e9801686a6b Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 11 Jul 2025 06:59:45 +1000 Subject: [PATCH 29/44] Improve data column KZG verification metric buckets (#7717) The current data column KZG verification buckets are not giving us useful info as the upper bound is too low. And we see most of numbers above 70ms for batch verification, and we don't know how much time it really takes. This PR improves the buckets based on the numbers we got from testing. Exponential bucket seems like a good candidate here given we're expecting to increase blob count with a similar approach (possibly 2x each fork if it goes well). --- beacon_node/beacon_chain/src/metrics.rs | 36 ++++++++++++++----------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 5ca764821f..23d7a1542d 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1803,26 +1803,30 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: "Runtime of batched kzg verification", ) }); +/// For reference on how the kzg data column verification buckets were set, here are some numbers for 48 blobs: +/// * 1 column batch: 5.76 ms +/// * 8 columns batch: 34.3 ms +/// * 64 columns batch: 257 ms +/// * 128 columns batch: 508 ms pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = + // 7 exponential buckets between 0.002 and 0.128 seconds, with more granularity on the lower end. LazyLock::new(|| { - try_create_histogram_with_buckets( - "beacon_kzg_verification_data_column_single_seconds", - "Runtime of single data column kzg verification", - Ok(vec![ - 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, - ]), - ) - }); + try_create_histogram_with_buckets( + "beacon_kzg_verification_data_column_single_seconds", + "Runtime of single data column kzg verification", + exponential_buckets(0.002, 2.0, 7), + ) + }); pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = + // 10 exponential buckets between 0.002 and 1.024 seconds, with more + // granularity on the lower end. LazyLock::new(|| { - try_create_histogram_with_buckets( - "beacon_kzg_verification_data_column_batch_seconds", - "Runtime of batched data column kzg verification", - Ok(vec![ - 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, - ]), - ) - }); + try_create_histogram_with_buckets( + "beacon_kzg_verification_data_column_batch_seconds", + "Runtime of batched data column kzg verification", + exponential_buckets(0.002, 2.0, 10), + ) + }); pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( || { From b43e0b446cd1d8a3f5b3c4a9f614f950ef672aa8 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Thu, 10 Jul 2025 16:32:58 -0500 Subject: [PATCH 30/44] Final changes for `fusaka-devnet-2` (#7655) Closes #7467. This PR primarily addresses [the P2P changes](https://github.com/ethereum/EIPs/pull/9840) in [fusaka-devnet-2](https://fusaka-devnet-2.ethpandaops.io/). Specifically: * [the new `nfd` parameter added to the `ENR`](https://github.com/ethereum/EIPs/pull/9840) * [the modified `compute_fork_digest()` changes for every BPO fork](https://github.com/ethereum/EIPs/pull/9840) 90% of this PR was absolutely hacked together as fast as possible during the Berlinterop as fast as I could while running between Glamsterdam debates. Luckily, it seems to work. But I was unable to be as careful in avoiding bugs as I usually am. I've cleaned up the things *I remember* wanting to come back and have a closer look at. But still working on this. Progress: * [x] get it working on `fusaka-devnet-2` * [ ] [*optional* disconnect from peers with incorrect `nfd` at the fork boundary](https://github.com/ethereum/consensus-specs/pull/4407) - Can be addressed in a future PR if necessary * [x] first pass clean-up * [x] fix up all the broken tests * [x] final self-review * [x] more thorough review from people more familiar with affected code --- beacon_node/beacon_chain/src/beacon_chain.rs | 22 +- beacon_node/http_api/src/light_client.rs | 16 +- beacon_node/lighthouse_network/src/config.rs | 2 +- .../lighthouse_network/src/discovery/enr.rs | 31 +- .../lighthouse_network/src/discovery/mod.rs | 25 +- .../lighthouse_network/src/rpc/codec.rs | 393 +++++++++--------- .../lighthouse_network/src/rpc/handler.rs | 14 +- .../lighthouse_network/src/rpc/methods.rs | 23 +- .../lighthouse_network/src/rpc/protocol.rs | 35 +- .../src/rpc/rate_limiter.rs | 15 +- .../lighthouse_network/src/service/mod.rs | 48 ++- .../lighthouse_network/src/types/pubsub.rs | 169 ++++---- .../lighthouse_network/tests/common.rs | 53 +-- .../lighthouse_network/tests/rpc_tests.rs | 46 +- beacon_node/network/src/service.rs | 128 +++--- beacon_node/network/src/service/tests.rs | 9 +- beacon_node/network/src/sync/manager.rs | 2 +- .../network/src/sync/network_context.rs | 2 +- consensus/types/src/beacon_block.rs | 35 +- consensus/types/src/chain_spec.rs | 251 ++++++++--- consensus/types/src/enr_fork_id.rs | 4 + consensus/types/src/fork_context.rs | 274 ++++++++++-- consensus/types/src/fork_name.rs | 2 - consensus/types/src/signed_beacon_block.rs | 16 +- lcli/src/generate_bootnode_enr.rs | 5 +- .../tests/checkpoint-sync-config-devnet.yaml | 8 +- 26 files changed, 1047 insertions(+), 581 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9900535b2c..01075ae4a4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6812,17 +6812,27 @@ impl BeaconChain { .enr_fork_id::(slot, self.genesis_validators_root) } - /// Calculates the `Duration` to the next fork if it exists and returns it - /// with it's corresponding `ForkName`. - pub fn duration_to_next_fork(&self) -> Option<(ForkName, Duration)> { + /// Returns the fork_digest corresponding to an epoch. + /// See [`ChainSpec::compute_fork_digest`] + pub fn compute_fork_digest(&self, epoch: Epoch) -> [u8; 4] { + self.spec + .compute_fork_digest(self.genesis_validators_root, epoch) + } + + /// Calculates the `Duration` to the next fork digest (this could be either a regular or BPO + /// hard fork) if it exists and returns it with its corresponding `Epoch`. + pub fn duration_to_next_digest(&self) -> Option<(Epoch, Duration)> { // If we are unable to read the slot clock we assume that it is prior to genesis and // therefore use the genesis slot. let slot = self.slot().unwrap_or(self.spec.genesis_slot); + let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + + let next_digest_epoch = self.spec.next_digest_epoch(epoch)?; + let next_digest_slot = next_digest_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let (fork_name, epoch) = self.spec.next_fork_epoch::(slot)?; self.slot_clock - .duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - .map(|duration| (fork_name, duration)) + .duration_to_slot(next_digest_slot) + .map(|duration| (next_digest_epoch, duration)) } /// This method serves to get a sense of the current chain health. It is used in block proposal diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index 24b1338a72..f9559d738e 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -4,12 +4,12 @@ use crate::version::{ }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ - self as api_types, ChainSpec, LightClientUpdate, LightClientUpdateResponseChunk, + self as api_types, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; -use types::{BeaconResponse, ForkName, Hash256, LightClientBootstrap}; +use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ hyper::{Body, Response}, reply::Reply, @@ -150,14 +150,10 @@ fn map_light_client_update_to_ssz_chunk( chain: &BeaconChain, light_client_update: &LightClientUpdate, ) -> LightClientUpdateResponseChunk { - let fork_name = chain - .spec - .fork_name_at_slot::(light_client_update.attested_header_slot()); - - let fork_digest = ChainSpec::compute_fork_digest( - chain.spec.fork_version_for_name(fork_name), - chain.genesis_validators_root, - ); + let epoch = light_client_update + .attested_header_slot() + .epoch(T::EthSpec::slots_per_epoch()); + let fork_digest = chain.compute_fork_digest(epoch); let payload = light_client_update.as_ssz_bytes(); let response_chunk_len = fork_digest.len() + payload.len(); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index bd72a5d51a..aee53a469c 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -457,7 +457,7 @@ pub fn gossipsub_config( ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); - if fork_context.current_fork().altair_enabled() { + if fork_context.current_fork_name().altair_enabled() { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 5628d5c463..4c05560497 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -3,6 +3,7 @@ pub use discv5::enr::CombinedKey; use super::enr_ext::CombinedKeyExt; +use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; @@ -18,10 +19,10 @@ use std::str::FromStr; use tracing::{debug, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; -use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; - /// The ENR field specifying the fork id. pub const ETH2_ENR_KEY: &str = "eth2"; +/// The ENR field specifying the next fork digest. +pub const NEXT_FORK_DIGEST_ENR_KEY: &str = "nfd"; /// The ENR field specifying the attestation subnet bitfield. pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; /// The ENR field specifying the sync committee subnet bitfield. @@ -42,6 +43,9 @@ pub trait Eth2Enr { /// The peerdas custody group count associated with the ENR. fn custody_group_count(&self, spec: &ChainSpec) -> Result; + /// The next fork digest associated with the ENR. + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; + fn eth2(&self) -> Result; } @@ -81,6 +85,12 @@ impl Eth2Enr for Enr { } } + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { + self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) + .ok_or("ENR next fork digest non-existent")? + .map_err(|_| "Could not decode the ENR next fork digest") + } + fn eth2(&self) -> Result { let eth2_bytes: Bytes = self .get_decodable(ETH2_ENR_KEY) @@ -149,13 +159,14 @@ pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, enr_fork_id: &EnrForkId, + next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, next_fork_digest, spec)?; use_or_load_enr(&enr_key, &mut local_enr, config)?; Ok(local_enr) @@ -166,6 +177,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, + next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { let mut builder = discv5::enr::Enr::builder(); @@ -257,7 +269,7 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); - // only set `cgc` if PeerDAS fork epoch has been scheduled + // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { let custody_group_count = if let Some(false_cgc) = config.advertise_false_custody_group_count { @@ -268,6 +280,7 @@ pub fn build_enr( spec.custody_requirement }; builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); + builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest); } builder @@ -340,6 +353,7 @@ mod test { use types::{Epoch, MainnetEthSpec}; type E = MainnetEthSpec; + const TEST_NFD: [u8; 4] = [0x01, 0x02, 0x03, 0x04]; fn make_fulu_spec() -> ChainSpec { let mut spec = E::default_spec(); @@ -351,10 +365,17 @@ mod test { let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&keypair); let enr_fork_id = EnrForkId::default(); - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, TEST_NFD, spec).unwrap(); (enr, enr_key) } + #[test] + fn test_nfd_enr_encoding() { + let spec = make_fulu_spec(); + let enr = build_enr_with_config(NetworkConfig::default(), &spec).0; + assert_eq!(enr.next_fork_digest().unwrap(), TEST_NFD); + } + #[test] fn custody_group_count_default() { let config = NetworkConfig { diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index ad4241c5b7..df866dfc64 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -49,7 +49,7 @@ use tracing::{debug, error, info, trace, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; mod subnet_predicate; -use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; +use crate::discovery::enr::{NEXT_FORK_DIGEST_ENR_KEY, PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY}; pub use subnet_predicate::subnet_predicate; use types::non_zero_usize::new_non_zero_usize; @@ -570,6 +570,19 @@ impl Discovery { Ok(()) } + pub fn update_enr_nfd(&mut self, nfd: [u8; 4]) -> Result<(), String> { + self.discv5 + .enr_insert::(NEXT_FORK_DIGEST_ENR_KEY, &nfd.as_ssz_bytes().into()) + .map_err(|e| format!("{:?}", e))?; + info!( + next_fork_digest = ?nfd, + "Updating the ENR nfd" + ); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); + *self.network_globals.local_enr.write() = self.discv5.local_enr(); + Ok(()) + } + /// Updates the `eth2` field of our local ENR. pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) { // to avoid having a reference to the spec constant, for the logging we assume @@ -1217,7 +1230,15 @@ mod tests { config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); - let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); + let next_fork_digest = [0; 4]; + let enr: Enr = build_enr::( + &enr_key, + &config, + &EnrForkId::default(), + next_fork_digest, + &spec, + ) + .unwrap(); let globals = NetworkGlobals::new( enr, MetaData::V2(MetaDataV2 { diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index f638dd5615..d01b3b76ca 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -193,7 +193,7 @@ impl Decoder for SSZSnappyInboundCodec { handle_rpc_request( self.protocol.versioned_protocol, &decoded_buffer, - self.fork_context.current_fork(), + self.fork_context.current_fork_name(), &self.fork_context.spec, ) } @@ -469,65 +469,9 @@ fn context_bytes( // Add the context bytes if required if protocol.has_context_bytes() { if let RpcResponse::Success(rpc_variant) = resp { - match rpc_variant { - RpcSuccessResponse::BlocksByRange(ref_box_block) - | RpcSuccessResponse::BlocksByRoot(ref_box_block) => { - return match **ref_box_block { - // NOTE: If you are adding another fork type here, be sure to modify the - // `fork_context.to_context_bytes()` function to support it as well! - SignedBeaconBlock::Fulu { .. } => { - fork_context.to_context_bytes(ForkName::Fulu) - } - SignedBeaconBlock::Electra { .. } => { - fork_context.to_context_bytes(ForkName::Electra) - } - SignedBeaconBlock::Deneb { .. } => { - fork_context.to_context_bytes(ForkName::Deneb) - } - SignedBeaconBlock::Capella { .. } => { - fork_context.to_context_bytes(ForkName::Capella) - } - SignedBeaconBlock::Bellatrix { .. } => { - fork_context.to_context_bytes(ForkName::Bellatrix) - } - SignedBeaconBlock::Altair { .. } => { - fork_context.to_context_bytes(ForkName::Altair) - } - SignedBeaconBlock::Base { .. } => { - Some(fork_context.genesis_context_bytes()) - } - }; - } - RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { - return fork_context.to_context_bytes(ForkName::Deneb); - } - RpcSuccessResponse::DataColumnsByRoot(_) - | RpcSuccessResponse::DataColumnsByRange(_) => { - return fork_context.to_context_bytes(ForkName::Fulu); - } - RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { - return lc_bootstrap - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - RpcSuccessResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { - return lc_optimistic_update - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - RpcSuccessResponse::LightClientFinalityUpdate(lc_finality_update) => { - return lc_finality_update - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => { - return lc_update - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - // These will not pass the has_context_bytes() check - RpcSuccessResponse::Status(_) - | RpcSuccessResponse::Pong(_) - | RpcSuccessResponse::MetaData(_) => { - return None; - } - } + return rpc_variant + .slot() + .map(|slot| fork_context.context_bytes(slot.epoch(E::slots_per_epoch()))); } } None @@ -938,7 +882,7 @@ fn context_bytes_to_fork_name( fork_context: Arc, ) -> Result { fork_context - .from_context_bytes(context_bytes) + .get_fork_from_context_bytes(context_bytes) .cloned() .ok_or_else(|| { let encoded = hex::encode(context_bytes); @@ -966,69 +910,88 @@ mod tests { type Spec = types::MainnetEthSpec; - fn fork_context(fork_name: ForkName) -> ForkContext { + fn spec_with_all_forks_enabled() -> ChainSpec { let mut chain_spec = Spec::default_spec(); - let altair_fork_epoch = Epoch::new(1); - let bellatrix_fork_epoch = Epoch::new(2); - let capella_fork_epoch = Epoch::new(3); - let deneb_fork_epoch = Epoch::new(4); - let electra_fork_epoch = Epoch::new(5); - let fulu_fork_epoch = Epoch::new(6); + chain_spec.altair_fork_epoch = Some(Epoch::new(1)); + chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + chain_spec.capella_fork_epoch = Some(Epoch::new(3)); + chain_spec.deneb_fork_epoch = Some(Epoch::new(4)); + chain_spec.electra_fork_epoch = Some(Epoch::new(5)); + chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); - chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - chain_spec.capella_fork_epoch = Some(capella_fork_epoch); - chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); - chain_spec.electra_fork_epoch = Some(electra_fork_epoch); - chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); + // check that we have all forks covered + assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); + chain_spec + } - let current_slot = match fork_name { - ForkName::Base => Slot::new(0), - ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Fulu => fulu_fork_epoch.start_slot(Spec::slots_per_epoch()), + fn fork_context(fork_name: ForkName, spec: &ChainSpec) -> ForkContext { + let current_epoch = match fork_name { + ForkName::Base => Some(Epoch::new(0)), + ForkName::Altair => spec.altair_fork_epoch, + ForkName::Bellatrix => spec.bellatrix_fork_epoch, + ForkName::Capella => spec.capella_fork_epoch, + ForkName::Deneb => spec.deneb_fork_epoch, + ForkName::Electra => spec.electra_fork_epoch, + ForkName::Fulu => spec.fulu_fork_epoch, }; - ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) + let current_slot = current_epoch.unwrap().start_slot(Spec::slots_per_epoch()); + ForkContext::new::(current_slot, Hash256::zero(), spec) } /// Smallest sized block across all current forks. Useful for testing /// min length check conditions. - fn empty_base_block() -> SignedBeaconBlock { - let empty_block = BeaconBlock::Base(BeaconBlockBase::::empty(&Spec::default_spec())); + fn empty_base_block(spec: &ChainSpec) -> SignedBeaconBlock { + let empty_block = BeaconBlock::Base(BeaconBlockBase::::empty(spec)); SignedBeaconBlock::from_block(empty_block, Signature::empty()) } - fn altair_block() -> SignedBeaconBlock { - let full_block = - BeaconBlock::Altair(BeaconBlockAltair::::full(&Spec::default_spec())); + fn altair_block(spec: &ChainSpec) -> SignedBeaconBlock { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. + let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(spec)); SignedBeaconBlock::from_block(full_block, Signature::empty()) } - fn empty_blob_sidecar() -> Arc> { - Arc::new(BlobSidecar::empty()) + fn empty_blob_sidecar(spec: &ChainSpec) -> Arc> { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. + let mut blob_sidecar = BlobSidecar::::empty(); + blob_sidecar.signed_block_header.message.slot = spec + .deneb_fork_epoch + .expect("deneb fork epoch must be set") + .start_slot(Spec::slots_per_epoch()); + Arc::new(blob_sidecar) } - fn empty_data_column_sidecar() -> Arc> { - Arc::new(DataColumnSidecar { + fn empty_data_column_sidecar(spec: &ChainSpec) -> Arc> { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. + let data_column_sidecar = DataColumnSidecar { index: 0, column: VariableList::new(vec![Cell::::default()]).unwrap(), kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(), signed_block_header: SignedBeaconBlockHeader { - message: BeaconBlockHeader::empty(), + message: BeaconBlockHeader { + slot: spec + .fulu_fork_epoch + .expect("fulu fork epoch must be set") + .start_slot(Spec::slots_per_epoch()), + ..BeaconBlockHeader::empty() + }, signature: Signature::empty(), }, kzg_commitments_inclusion_proof: Default::default(), - }) + }; + Arc::new(data_column_sidecar) } /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small(spec: &ChainSpec) -> SignedBeaconBlock { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. let mut block: BeaconBlockBellatrix<_, FullPayload> = - BeaconBlockBellatrix::empty(&Spec::default_spec()); + BeaconBlockBellatrix::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); @@ -1044,8 +1007,10 @@ mod tests { /// The max limit for a Bellatrix block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a Bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn bellatrix_block_large(spec: &ChainSpec) -> SignedBeaconBlock { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. let mut block: BeaconBlockBellatrix<_, FullPayload> = - BeaconBlockBellatrix::empty(&Spec::default_spec()); + BeaconBlockBellatrix::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); @@ -1101,7 +1066,7 @@ mod tests { } } - fn dcbroot_request(spec: &ChainSpec, fork_name: ForkName) -> DataColumnsByRootRequest { + fn dcbroot_request(fork_name: ForkName, spec: &ChainSpec) -> DataColumnsByRootRequest { let number_of_columns = spec.number_of_columns as usize; DataColumnsByRootRequest { data_column_ids: RuntimeVariableList::new( @@ -1115,21 +1080,21 @@ mod tests { } } - fn bbroot_request_v1(fork_name: ForkName) -> BlocksByRootRequest { - BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name)) + fn bbroot_request_v1(fork_name: ForkName, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name, spec)) } - fn bbroot_request_v2(fork_name: ForkName) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name)) + fn bbroot_request_v2(fork_name: ForkName, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name, spec)) } - fn blbroot_request(fork_name: ForkName) -> BlobsByRootRequest { + fn blbroot_request(fork_name: ForkName, spec: &ChainSpec) -> BlobsByRootRequest { BlobsByRootRequest::new( vec![BlobIdentifier { block_root: Hash256::zero(), index: 0, }], - &fork_context(fork_name), + &fork_context(fork_name, spec), ) } @@ -1172,7 +1137,7 @@ mod tests { spec: &ChainSpec, ) -> Result { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(fork_name)); + let fork_context = Arc::new(fork_context(fork_name, spec)); let max_packet_size = spec.max_payload_size as usize; let mut buf = BytesMut::new(); @@ -1186,12 +1151,13 @@ mod tests { fn encode_without_length_checks( bytes: Vec, fork_name: ForkName, + spec: &ChainSpec, ) -> Result { - let fork_context = fork_context(fork_name); + let fork_context = fork_context(fork_name, spec); let mut dst = BytesMut::new(); // Add context bytes if required - dst.extend_from_slice(&fork_context.to_context_bytes(fork_name).unwrap()); + dst.extend_from_slice(&fork_context.context_bytes(fork_context.current_fork_epoch())); let mut uvi_codec: Uvi = Uvi::default(); @@ -1219,7 +1185,7 @@ mod tests { spec: &ChainSpec, ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(fork_name)); + let fork_context = Arc::new(fork_context(fork_name, spec)); let max_packet_size = spec.max_payload_size as usize; let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); @@ -1240,7 +1206,7 @@ mod tests { /// Verifies that requests we send are encoded in a way that we would correctly decode too. fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { - let fork_context = Arc::new(fork_context(fork_name)); + let fork_context = Arc::new(fork_context(fork_name, spec)); let max_packet_size = spec.max_payload_size as usize; let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); // Encode a request we send @@ -1311,7 +1277,7 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v1() { - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); assert_eq!( encode_then_decode_response( @@ -1348,13 +1314,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); @@ -1363,7 +1329,7 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - altair_block() + altair_block(&chain_spec) ))), ForkName::Altair, &chain_spec, @@ -1378,13 +1344,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV1, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); @@ -1392,9 +1358,9 @@ mod tests { matches!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlocksByRoot( - Arc::new(altair_block()) - )), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block( + &chain_spec + )))), ForkName::Altair, &chain_spec, ) @@ -1439,74 +1405,98 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + ))), ForkName::Deneb, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + ))), ForkName::Electra, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + ))), ForkName::Fulu, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + ))), ForkName::Deneb, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + ))), ForkName::Electra, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + ))), ForkName::Fulu, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Deneb, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1514,13 +1504,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Electra, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1528,13 +1518,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Fulu, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1542,13 +1532,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Deneb, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1556,13 +1546,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Electra, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1570,13 +1560,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Fulu, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); } @@ -1584,19 +1574,19 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v2() { - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); @@ -1607,25 +1597,27 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block( + &chain_spec + )))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - altair_block() + altair_block(&chain_spec) )))) ); @@ -1646,9 +1638,12 @@ mod tests { )))) ); - let mut encoded = - encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix) - .unwrap(); + let mut encoded = encode_without_length_checks( + bellatrix_block_large.as_ssz_bytes(), + ForkName::Bellatrix, + &chain_spec, + ) + .unwrap(); assert!( matches!( @@ -1668,13 +1663,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))), ); @@ -1685,25 +1680,27 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block( + &chain_spec + )))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - altair_block() + altair_block(&chain_spec) )))) ); @@ -1721,9 +1718,12 @@ mod tests { )))) ); - let mut encoded = - encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix) - .unwrap(); + let mut encoded = encode_without_length_checks( + bellatrix_block_large.as_ssz_bytes(), + ForkName::Bellatrix, + &chain_spec, + ) + .unwrap(); assert!( matches!( @@ -1785,15 +1785,14 @@ mod tests { // Test RPCResponse encoding/decoding for V2 messages #[test] fn test_context_bytes_v2() { - let fork_context = fork_context(ForkName::Altair); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = fork_context(ForkName::Altair, &chain_spec); // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Base, &chain_spec, @@ -1816,7 +1815,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Base, &chain_spec, @@ -1840,7 +1839,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Altair, &chain_spec, @@ -1848,8 +1847,8 @@ mod tests { .unwrap(); let mut wrong_fork_bytes = BytesMut::new(); - wrong_fork_bytes - .extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + let altair_epoch = chain_spec.altair_fork_epoch.unwrap(); + wrong_fork_bytes.extend_from_slice(&fork_context.context_bytes(altair_epoch)); wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( @@ -1866,14 +1865,18 @@ mod tests { // Trying to decode an altair block with base context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block( + &chain_spec, + )))), ForkName::Altair, &chain_spec, ) .unwrap(); let mut wrong_fork_bytes = BytesMut::new(); - wrong_fork_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Base).unwrap()); + wrong_fork_bytes.extend_from_slice( + &fork_context.context_bytes(chain_spec.genesis_slot.epoch(Spec::slots_per_epoch())), + ); wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( @@ -1889,7 +1892,7 @@ mod tests { // Adding context bytes to Protocols that don't require it should return an error let mut encoded_bytes = BytesMut::new(); - encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + encoded_bytes.extend_from_slice(&fork_context.context_bytes(altair_epoch)); encoded_bytes.extend_from_slice( &encode_response( SupportedProtocol::MetaDataV2, @@ -1912,7 +1915,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Altair, &chain_spec, @@ -1938,7 +1941,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Altair, &chain_spec, @@ -1960,8 +1963,7 @@ mod tests { #[test] fn test_encode_then_decode_request() { - let fork_context = fork_context(ForkName::Electra); - let chain_spec = fork_context.spec.clone(); + let chain_spec = spec_with_all_forks_enabled(); let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), @@ -1985,10 +1987,10 @@ mod tests { // Handled separately to have consistent `ForkName` across request and responses let fork_dependent_requests = |fork_name| { [ - RequestType::BlobsByRoot(blbroot_request(fork_name)), - RequestType::BlocksByRoot(bbroot_request_v1(fork_name)), - RequestType::BlocksByRoot(bbroot_request_v2(fork_name)), - RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec, fork_name)), + RequestType::BlobsByRoot(blbroot_request(fork_name, &chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v1(fork_name, &chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v2(fork_name, &chain_spec)), + RequestType::DataColumnsByRoot(dcbroot_request(fork_name, &chain_spec)), ] }; for fork_name in ForkName::list_all() { @@ -2048,7 +2050,7 @@ mod tests { assert_eq!(writer.get_ref().len(), 42); dst.extend_from_slice(writer.get_ref()); - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( decode_response( @@ -2066,7 +2068,8 @@ mod tests { /// sends a valid message filled with a stream of useless padding before the actual message. #[test] fn test_decode_malicious_v2_message() { - let fork_context = Arc::new(fork_context(ForkName::Altair)); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Altair, &chain_spec)); // 10 byte snappy stream identifier let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; @@ -2078,7 +2081,7 @@ mod tests { let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; // Full altair block is 157916 bytes uncompressed. `max_compressed_len` is 32 + 157916 + 157916/6 = 184267. - let block_message_bytes = altair_block().as_ssz_bytes(); + let block_message_bytes = altair_block(&fork_context.spec).as_ssz_bytes(); assert_eq!(block_message_bytes.len(), 157916); assert_eq!( @@ -2090,7 +2093,8 @@ mod tests { let mut dst = BytesMut::with_capacity(1024); // Insert context bytes - dst.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + let altair_epoch = fork_context.spec.altair_fork_epoch.unwrap(); + dst.extend_from_slice(&fork_context.context_bytes(altair_epoch)); // Insert length-prefix uvi_codec @@ -2105,14 +2109,14 @@ mod tests { dst.extend_from_slice(malicious_padding); } - // Insert payload (8103 bytes compressed) + // Insert payload (8102 bytes compressed) let mut writer = FrameEncoder::new(Vec::new()); writer.write_all(&block_message_bytes).unwrap(); writer.flush().unwrap(); - assert_eq!(writer.get_ref().len(), 8103); + assert_eq!(writer.get_ref().len(), 8102); dst.extend_from_slice(writer.get_ref()); - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( @@ -2148,7 +2152,7 @@ mod tests { let mut uvi_codec: Uvi = Uvi::default(); let mut dst = BytesMut::with_capacity(1024); - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); // Insert length-prefix uvi_codec @@ -2184,9 +2188,8 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec)); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, @@ -2220,9 +2223,8 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec)); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, @@ -2251,9 +2253,8 @@ mod tests { let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy); // Response limits - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec)); let max_rpc_size = chain_spec.max_payload_size as usize; let limit = protocol_id.rpc_response_limits::(&fork_context); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 396d390b00..fe7be93662 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -912,7 +912,7 @@ where } let (req, substream) = substream; - let current_fork = self.fork_context.current_fork(); + let current_fork = self.fork_context.current_fork_name(); let spec = &self.fork_context.spec; match &req { @@ -950,8 +950,10 @@ where _ => {} }; - let max_responses = - req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); + let max_responses = req.max_responses( + self.fork_context.current_fork_epoch(), + &self.fork_context.spec, + ); // store requests that expect responses if max_responses > 0 { @@ -1021,8 +1023,10 @@ where } // add the stream to substreams if we expect a response, otherwise drop the stream. - let max_responses = - request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); + let max_responses = request.max_responses( + self.fork_context.current_fork_epoch(), + &self.fork_context.spec, + ); if max_responses > 0 { let max_remaining_chunks = if request.expect_exactly_one_response() { // Currently enforced only for multiple responses diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 74cfc6d198..5300544821 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -484,7 +484,7 @@ impl BlocksByRootRequest { pub fn new(block_roots: Vec, fork_context: &ForkContext) -> Self { let max_request_blocks = fork_context .spec - .max_request_blocks(fork_context.current_fork()); + .max_request_blocks(fork_context.current_fork_name()); let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V2(BlocksByRootRequestV2 { block_roots }) } @@ -492,7 +492,7 @@ impl BlocksByRootRequest { pub fn new_v1(block_roots: Vec, fork_context: &ForkContext) -> Self { let max_request_blocks = fork_context .spec - .max_request_blocks(fork_context.current_fork()); + .max_request_blocks(fork_context.current_fork_name()); let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V1(BlocksByRootRequestV1 { block_roots }) } @@ -509,7 +509,7 @@ impl BlobsByRootRequest { pub fn new(blob_ids: Vec, fork_context: &ForkContext) -> Self { let max_request_blob_sidecars = fork_context .spec - .max_request_blob_sidecars(fork_context.current_fork()); + .max_request_blob_sidecars(fork_context.current_fork_name()); let blob_ids = RuntimeVariableList::from_vec(blob_ids, max_request_blob_sidecars); Self { blob_ids } } @@ -749,6 +749,23 @@ impl RpcSuccessResponse { RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, } } + + pub fn slot(&self) -> Option { + match self { + Self::BlocksByRange(r) | Self::BlocksByRoot(r) => Some(r.slot()), + Self::BlobsByRange(r) | Self::BlobsByRoot(r) => { + Some(r.signed_block_header.message.slot) + } + Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => { + Some(r.signed_block_header.message.slot) + } + Self::LightClientBootstrap(r) => Some(r.get_slot()), + Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), + Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), + Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), + Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + } + } } impl std::fmt::Display for RpcErrorResponse { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8f613dcbf9..500e98d5c3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,10 +18,10 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, - EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, + EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -545,15 +545,15 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response - Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), - Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), + Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork_name()), + Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork_name()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), Protocol::DataColumnsByRoot => { - rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) + rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } Protocol::DataColumnsByRange => { - rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) + rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -564,16 +564,16 @@ impl ProtocolId { as Encode>::ssz_fixed_len(), ), Protocol::LightClientBootstrap => { - rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork()) + rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork_name()) } Protocol::LightClientOptimisticUpdate => { - rpc_light_client_optimistic_update_limits_by_fork(fork_context.current_fork()) + rpc_light_client_optimistic_update_limits_by_fork(fork_context.current_fork_name()) } Protocol::LightClientFinalityUpdate => { - rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork()) + rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork_name()) } Protocol::LightClientUpdatesByRange => { - rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork()) + rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork_name()) } } } @@ -635,11 +635,13 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -pub fn rpc_data_column_limits(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits { +pub fn rpc_data_column_limits( + current_digest_epoch: Epoch, + spec: &ChainSpec, +) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::min_size(), - // TODO(EIP-7892): fix this once we change fork-version on BPO forks - DataColumnSidecar::::max_size(spec.max_blobs_per_block_within_fork(fork_name) as usize), + DataColumnSidecar::::max_size(spec.max_blobs_per_block(current_digest_epoch) as usize), ) } @@ -738,16 +740,13 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - /// TODO(EIP-7892): refactor this to remove `_current_fork` - pub fn max_responses(&self, _current_fork: ForkName, spec: &ChainSpec) -> u64 { + pub fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => { - req.max_blobs_requested(Slot::new(req.start_slot).epoch(E::slots_per_epoch()), spec) - } + RequestType::BlobsByRange(req) => req.max_blobs_requested(digest_epoch, spec), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 6e66999612..f8fd54eb2a 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; -use types::{ChainSpec, EthSpec, ForkContext, ForkName}; +use types::{ChainSpec, Epoch, EthSpec, ForkContext}; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -267,7 +267,7 @@ impl RPCRateLimiterBuilder { pub trait RateLimiterItem { fn protocol(&self) -> Protocol; - fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64; + fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64; } impl RateLimiterItem for super::RequestType { @@ -275,8 +275,8 @@ impl RateLimiterItem for super::RequestType { self.versioned_protocol().protocol() } - fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { - self.max_responses(current_fork, spec) + fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64 { + self.max_responses(digest_epoch, spec) } } @@ -285,7 +285,7 @@ impl RateLimiterItem for (super::RpcResponse, Protocol) { self.1 } - fn max_responses(&self, _current_fork: ForkName, _spec: &ChainSpec) -> u64 { + fn max_responses(&self, _digest_epoch: Epoch, _spec: &ChainSpec) -> u64 { // A response chunk consumes one token of the rate limiter. 1 } @@ -353,7 +353,10 @@ impl RPCRateLimiter { ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request - .max_responses(self.fork_context.current_fork(), &self.fork_context.spec) + .max_responses( + self.fork_context.current_fork_epoch(), + &self.fork_context.spec, + ) .max(1); let check = diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 0f5745a3a2..a880fdb3e7 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -193,10 +193,15 @@ impl Network { // set up a collection of variables accessible outside of the network crate // Create an ENR or load from disk if appropriate + let next_fork_digest = ctx + .fork_context + .next_fork_digest() + .unwrap_or_else(|| ctx.fork_context.current_fork_digest()); let enr = crate::discovery::enr::build_or_load_enr::( local_keypair.clone(), &config, &ctx.enr_fork_id, + next_fork_digest, &ctx.chain_spec, )?; @@ -280,27 +285,26 @@ impl Network { // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); - let current_and_future_forks = ForkName::list_all().into_iter().filter_map(|fork| { - if fork >= ctx.fork_context.current_fork() { - ctx.fork_context - .to_context_bytes(fork) - .map(|fork_digest| (fork, fork_digest)) - } else { - None - } - }); + let current_digest_epoch = ctx.fork_context.current_fork_epoch(); + let current_and_future_digests = + ctx.chain_spec + .all_digest_epochs() + .filter_map(|digest_epoch| { + if digest_epoch >= current_digest_epoch { + Some((digest_epoch, ctx.fork_context.context_bytes(digest_epoch))) + } else { + None + } + }); - let all_topics_for_forks = current_and_future_forks - .map(|(fork, fork_digest)| { + let all_topics_for_digests = current_and_future_digests + .map(|(epoch, digest)| { + let fork = ctx.chain_spec.fork_name_at_epoch(epoch); all_topics_at_fork::(fork, &ctx.chain_spec) .into_iter() .map(|topic| { - Topic::new(GossipTopic::new( - topic, - GossipEncoding::default(), - fork_digest, - )) - .into() + Topic::new(GossipTopic::new(topic, GossipEncoding::default(), digest)) + .into() }) .collect::>() }) @@ -308,7 +312,7 @@ impl Network { // For simplicity find the fork with the most individual topics and assume all forks // have the same topic count - let max_topics_at_any_fork = all_topics_for_forks + let max_topics_at_any_fork = all_topics_for_digests .iter() .map(|topics| topics.len()) .max() @@ -359,7 +363,7 @@ impl Network { // If we are using metrics, then register which topics we want to make sure to keep // track of if ctx.libp2p_registry.is_some() { - for topics in all_topics_for_forks { + for topics in all_topics_for_digests { gossipsub.register_topics_for_metrics(topics); } } @@ -1347,6 +1351,12 @@ impl Network { self.enr_fork_id = enr_fork_id; } + pub fn update_nfd(&mut self, nfd: [u8; 4]) { + if let Err(e) = self.discovery_mut().update_enr_nfd(nfd) { + crit!(error = e, "Could not update nfd in ENR"); + } + } + /* Private internal functions */ /// Updates the current meta data of the node to match the local ENR. diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 21df75a648..601c59a9c8 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -171,28 +171,29 @@ impl PubsubMessage { // the ssz decoders match gossip_topic.kind() { GossipKind::BeaconAggregateAndProof => { - let signed_aggregate_and_proof = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(&fork_name) => { - if fork_name.electra_enabled() { - SignedAggregateAndProof::Electra( - SignedAggregateAndProofElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } else { - SignedAggregateAndProof::Base( - SignedAggregateAndProofBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } + let signed_aggregate_and_proof = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { + Some(&fork_name) => { + if fork_name.electra_enabled() { + SignedAggregateAndProof::Electra( + SignedAggregateAndProofElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + SignedAggregateAndProof::Base( + SignedAggregateAndProofBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) } - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) - } - }; + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::AggregateAndProofAttestation(Box::new( signed_aggregate_and_proof, ))) @@ -206,48 +207,49 @@ impl PubsubMessage { )))) } GossipKind::BeaconBlock => { - let beacon_block = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(ForkName::Base) => SignedBeaconBlock::::Base( - SignedBeaconBlockBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Altair) => SignedBeaconBlock::::Altair( - SignedBeaconBlockAltair::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Bellatrix) => SignedBeaconBlock::::Bellatrix( - SignedBeaconBlockBellatrix::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Capella) => SignedBeaconBlock::::Capella( - SignedBeaconBlockCapella::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Deneb) => SignedBeaconBlock::::Deneb( - SignedBeaconBlockDeneb::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Electra) => SignedBeaconBlock::::Electra( - SignedBeaconBlockElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Fulu) => SignedBeaconBlock::::Fulu( - SignedBeaconBlockFulu::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) - } - }; + let beacon_block = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { + Some(ForkName::Base) => SignedBeaconBlock::::Base( + SignedBeaconBlockBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Altair) => SignedBeaconBlock::::Altair( + SignedBeaconBlockAltair::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Bellatrix) => SignedBeaconBlock::::Bellatrix( + SignedBeaconBlockBellatrix::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Capella) => SignedBeaconBlock::::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Deneb) => SignedBeaconBlock::::Deneb( + SignedBeaconBlockDeneb::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Electra) => SignedBeaconBlock::::Electra( + SignedBeaconBlockElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Fulu) => SignedBeaconBlock::::Fulu( + SignedBeaconBlockFulu::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::BlobSidecar(blob_index) => { if let Some(fork_name) = - fork_context.from_context_bytes(gossip_topic.fork_digest) + fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { if fork_name.deneb_enabled() { let blob_sidecar = Arc::new( @@ -267,7 +269,7 @@ impl PubsubMessage { )) } GossipKind::DataColumnSidecar(subnet_id) => { - match fork_context.from_context_bytes(gossip_topic.fork_digest) { + match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(fork) if fork.fulu_enabled() => { let col_sidecar = Arc::new( DataColumnSidecar::from_ssz_bytes(data) @@ -295,28 +297,29 @@ impl PubsubMessage { Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing))) } GossipKind::AttesterSlashing => { - let attester_slashing = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(&fork_name) => { - if fork_name.electra_enabled() { - AttesterSlashing::Electra( - AttesterSlashingElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } else { - AttesterSlashing::Base( - AttesterSlashingBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } + let attester_slashing = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { + Some(&fork_name) => { + if fork_name.electra_enabled() { + AttesterSlashing::Electra( + AttesterSlashingElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + AttesterSlashing::Base( + AttesterSlashingBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) } - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) - } - }; + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing))) } GossipKind::SignedContributionAndProof => { @@ -343,7 +346,7 @@ impl PubsubMessage { ))) } GossipKind::LightClientFinalityUpdate => { - let light_client_finality_update = match fork_context.from_context_bytes(gossip_topic.fork_digest) { + let light_client_finality_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(&fork_name) => { LightClientFinalityUpdate::from_ssz_bytes(data, fork_name) .map_err(|e| format!("{:?}", e))? @@ -358,7 +361,7 @@ impl PubsubMessage { ))) } GossipKind::LightClientOptimisticUpdate => { - let light_client_optimistic_update = match fork_context.from_context_bytes(gossip_topic.fork_digest) { + let light_client_optimistic_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(&fork_name) => { LightClientOptimisticUpdate::from_ssz_bytes(data, fork_name) .map_err(|e| format!("{:?}", e))? diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 0dac126909..61f48a9a6f 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -11,7 +11,7 @@ use tracing::{debug, error, info_span, Instrument}; use tracing_subscriber::EnvFilter; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, - MinimalEthSpec, Slot, + MinimalEthSpec, }; type E = MinimalEthSpec; @@ -19,33 +19,36 @@ type E = MinimalEthSpec; use lighthouse_network::rpc::config::InboundRateLimiterConfig; use tempfile::Builder as TempBuilder; -/// Returns a dummy fork context -pub fn fork_context(fork_name: ForkName) -> ForkContext { +/// Returns a chain spec with all forks enabled. +pub fn spec_with_all_forks_enabled() -> ChainSpec { let mut chain_spec = E::default_spec(); - let altair_fork_epoch = Epoch::new(1); - let bellatrix_fork_epoch = Epoch::new(2); - let capella_fork_epoch = Epoch::new(3); - let deneb_fork_epoch = Epoch::new(4); - let electra_fork_epoch = Epoch::new(5); - let fulu_fork_epoch = Epoch::new(6); + chain_spec.altair_fork_epoch = Some(Epoch::new(1)); + chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + chain_spec.capella_fork_epoch = Some(Epoch::new(3)); + chain_spec.deneb_fork_epoch = Some(Epoch::new(4)); + chain_spec.electra_fork_epoch = Some(Epoch::new(5)); + chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); - chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - chain_spec.capella_fork_epoch = Some(capella_fork_epoch); - chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); - chain_spec.electra_fork_epoch = Some(electra_fork_epoch); - chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); + // check that we have all forks covered + assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); + chain_spec +} - let current_slot = match fork_name { - ForkName::Base => Slot::new(0), - ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Electra => electra_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Fulu => fulu_fork_epoch.start_slot(E::slots_per_epoch()), +/// Returns a dummy fork context +pub fn fork_context(fork_name: ForkName, spec: &ChainSpec) -> ForkContext { + let current_epoch = match fork_name { + ForkName::Base => Some(Epoch::new(0)), + ForkName::Altair => spec.altair_fork_epoch, + ForkName::Bellatrix => spec.bellatrix_fork_epoch, + ForkName::Capella => spec.capella_fork_epoch, + ForkName::Deneb => spec.deneb_fork_epoch, + ForkName::Electra => spec.electra_fork_epoch, + ForkName::Fulu => spec.fulu_fork_epoch, }; - ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) + let current_slot = current_epoch + .unwrap_or_else(|| panic!("expect fork {fork_name} to be scheduled")) + .start_slot(E::slots_per_epoch()); + ForkContext::new::(current_slot, Hash256::zero(), spec) } pub struct Libp2pInstance( @@ -122,7 +125,7 @@ pub async fn build_libp2p_instance( let libp2p_context = lighthouse_network::Context { config, enr_fork_id: EnrForkId::default(), - fork_context: Arc::new(fork_context(fork_name)), + fork_context: Arc::new(fork_context(fork_name, &chain_spec)), chain_spec, libp2p_registry: None, }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index e50f70e43a..11fe93288f 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -2,6 +2,7 @@ mod common; +use crate::common::spec_with_all_forks_enabled; use common::{build_tracing_subscriber, Protocol}; use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; @@ -60,7 +61,7 @@ fn test_tcp_status_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -168,7 +169,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -318,7 +319,7 @@ fn test_blobs_by_range_chunked_rpc() { rt.block_on(async { // get sender/receiver - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), ForkName::Deneb, @@ -330,13 +331,18 @@ fn test_blobs_by_range_chunked_rpc() { .await; // BlobsByRange Request + let deneb_slot = spec + .deneb_fork_epoch + .expect("deneb must be scheduled") + .start_slot(E::slots_per_epoch()); let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { - start_slot: 0, + start_slot: deneb_slot.as_u64(), count: slot_count, }); - // BlocksByRange Response - let blob = BlobSidecar::::empty(); + // BlobsByRange Response + let mut blob = BlobSidecar::::empty(); + blob.signed_block_header.message.slot = deneb_slot; let rpc_response = Response::BlobsByRange(Some(Arc::new(blob))); @@ -438,7 +444,7 @@ fn test_tcp_blocks_by_range_over_limit() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -545,7 +551,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -681,7 +687,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -804,14 +810,15 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let messages_to_send = 6; - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Bellatrix; let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - ForkName::Bellatrix, + current_fork_name, spec.clone(), Protocol::Tcp, false, @@ -831,7 +838,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { Hash256::zero(), Hash256::zero(), ], - spec.max_request_blocks_upper_bound(), + spec.max_request_blocks(current_fork_name), ), })); @@ -934,7 +941,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { tokio::select! { _ = sender_future => {} _ = receiver_future => {} - _ = sleep(Duration::from_secs(30)) => { + _ = sleep(Duration::from_secs(300)) => { panic!("Future timed out"); } } @@ -952,14 +959,15 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { let messages_to_send: u64 = 10; let extra_messages_to_send: u64 = 10; - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork = ForkName::Base; let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - ForkName::Base, + current_fork, spec.clone(), Protocol::Tcp, false, @@ -983,7 +991,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { Hash256::zero(), Hash256::zero(), ], - spec.max_request_blocks_upper_bound(), + spec.max_request_blocks(current_fork), ), })); @@ -1098,7 +1106,7 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); // get sender/receiver rt.block_on(async { @@ -1180,7 +1188,7 @@ fn test_delayed_rpc_response() { // Set up the logging. build_tracing_subscriber("debug", true); let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); // Allow 1 token to be use used every 3 seconds. const QUOTA_SEC: u64 = 3; @@ -1314,7 +1322,7 @@ fn test_active_requests() { // Set up the logging. build_tracing_subscriber("debug", true); let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // Get sender/receiver. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 89f71dc367..f0414b6612 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -37,8 +37,8 @@ use tokio::sync::mpsc; use tokio::time::Sleep; use tracing::{debug, error, info, info_span, trace, warn, Instrument}; use types::{ - ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, - Unsigned, ValidatorSubscription, + EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, Unsigned, + ValidatorSubscription, }; mod tests; @@ -187,11 +187,11 @@ pub struct NetworkService { store: Arc>, /// A collection of global variables, accessible outside of the network service. network_globals: Arc>, - /// A delay that expires when a new fork takes place. - next_fork_update: Pin>>, - /// A delay that expires when we need to subscribe to a new fork's topics. - next_fork_subscriptions: Pin>>, - /// A delay that expires when we need to unsubscribe from old fork topics. + /// A delay that expires when the fork digest changes. + next_digest_update: Pin>>, + /// A delay that expires when we need to subscribe to a new set of topics. + next_topic_subscriptions: Pin>>, + /// A delay that expires when we need to unsubscribe from old topics. next_unsubscribe: Pin>>, /// Shutdown beacon node after sync is complete. shutdown_after_sync: bool, @@ -250,8 +250,10 @@ impl NetworkService { let enr_fork_id = beacon_chain.enr_fork_id(); // keep track of when our fork_id needs to be updated - let next_fork_update = Box::pin(next_fork_delay(&beacon_chain).into()); - let next_fork_subscriptions = Box::pin(next_fork_subscriptions_delay(&beacon_chain).into()); + let next_digest_update = Box::pin(next_digest_delay(&beacon_chain).into()); + // topics change when the fork digest changes + let next_topic_subscriptions = + Box::pin(next_topic_subscriptions_delay(&beacon_chain).into()); let next_unsubscribe = Box::pin(None.into()); let current_slot = beacon_chain @@ -265,8 +267,6 @@ impl NetworkService { &beacon_chain.spec, )); - debug!(fork_name = ?fork_context.current_fork(), "Current fork"); - // construct the libp2p service context let service_context = Context { config: config.clone(), @@ -346,8 +346,8 @@ impl NetworkService { router_send, store, network_globals: network_globals.clone(), - next_fork_update, - next_fork_subscriptions, + next_digest_update, + next_topic_subscriptions, next_unsubscribe, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -389,29 +389,16 @@ impl NetworkService { let fork_context = &self.fork_context; let spec = &self.beacon_chain.spec; let current_slot = self.beacon_chain.slot().unwrap_or(spec.genesis_slot); - let current_fork = fork_context.current_fork(); + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let mut result = vec![fork_context - .to_context_bytes(current_fork) - .unwrap_or_else(|| { - panic!( - "{} fork bytes should exist as it's initialized in ForkContext", - current_fork - ) - })]; + let mut result = vec![fork_context.context_bytes(current_epoch)]; - if let Some((next_fork, fork_epoch)) = spec.next_fork_epoch::(current_slot) { + if let Some(next_digest_epoch) = spec.next_digest_epoch(current_epoch) { if current_slot.saturating_add(Slot::new(SUBSCRIBE_DELAY_SLOTS)) - >= fork_epoch.start_slot(T::EthSpec::slots_per_epoch()) + >= next_digest_epoch.start_slot(T::EthSpec::slots_per_epoch()) { - let next_fork_context_bytes = - fork_context.to_context_bytes(next_fork).unwrap_or_else(|| { - panic!( - "context bytes should exist as spec.next_fork_epoch({}) returned Some({})", - current_slot, next_fork - ) - }); - result.push(next_fork_context_bytes); + let next_digest = fork_context.context_bytes(next_digest_epoch); + result.push(next_digest); } } @@ -454,7 +441,7 @@ impl NetworkService { event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, - Some(_) = &mut self.next_fork_update => self.update_next_fork(), + Some(_) = &mut self.next_digest_update => self.update_next_fork_digest(), Some(_) = &mut self.next_unsubscribe => { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); @@ -463,13 +450,13 @@ impl NetworkService { self.next_unsubscribe = Box::pin(None.into()); } - Some(_) = &mut self.next_fork_subscriptions => { - if let Some((fork_name, _)) = self.beacon_chain.duration_to_next_fork() { - let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); - let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); + Some(_) = &mut self.next_topic_subscriptions => { + if let Some((epoch, _)) = self.beacon_chain.duration_to_next_digest() { + let fork_name = self.beacon_chain.spec.fork_name_at_epoch(epoch); + let fork_digest = self.beacon_chain.compute_fork_digest(epoch); info!("Subscribing to new fork topics"); self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); - self.next_fork_subscriptions = Box::pin(None.into()); + self.next_topic_subscriptions = Box::pin(None.into()); } else { error!( "Fork subscription scheduled but no fork scheduled"); @@ -702,7 +689,7 @@ impl NetworkService { let mut subscribed_topics: Vec = vec![]; for topic_kind in core_topics_to_subscribe::( - self.fork_context.current_fork(), + self.fork_context.current_fork_name(), &self.network_globals.as_topic_config(), &self.fork_context.spec, ) { @@ -830,31 +817,52 @@ impl NetworkService { } } - fn update_next_fork(&mut self) { + fn update_next_fork_digest(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + // if we are unable to read the slot clock we assume that it is prior to genesis + let current_epoch = self.beacon_chain.epoch().unwrap_or( + self.beacon_chain + .spec + .genesis_slot + .epoch(T::EthSpec::slots_per_epoch()), + ); let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { - info!( - old_fork = ?fork_context.current_fork(), - new_fork = ?new_fork_name, - "Transitioned to new fork" - ); - fork_context.update_current_fork(*new_fork_name); + if let Some(new_fork_name) = fork_context.get_fork_from_context_bytes(new_fork_digest) { + if fork_context.current_fork_name() == *new_fork_name { + info!( + epoch = ?current_epoch, + "BPO Fork Triggered" + ) + } else { + info!( + old_fork = ?fork_context.current_fork_name(), + new_fork = ?new_fork_name, + "Transitioned to new fork" + ); + } + + fork_context.update_current_fork(*new_fork_name, new_fork_digest, current_epoch); + if self.beacon_chain.spec.is_peer_das_scheduled() { + let next_fork_digest = fork_context + .next_fork_digest() + .unwrap_or_else(|| fork_context.current_fork_digest()); + self.libp2p.update_nfd(next_fork_digest); + } self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update - self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); + self.next_digest_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); // Set the next_unsubscribe delay. let epoch_duration = self.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); - // Update the `next_fork_subscriptions` timer if the next fork is known. - self.next_fork_subscriptions = - Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); + // Update the `next_topic_subscriptions` timer if the next change in the fork digest is known. + self.next_topic_subscriptions = + Box::pin(next_topic_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!( remaining_epochs = UNSUBSCRIBE_DELAY_EPOCHS, @@ -871,7 +879,7 @@ impl NetworkService { fn subscribed_core_topics(&self) -> bool { let core_topics = core_topics_to_subscribe::( - self.fork_context.current_fork(), + self.fork_context.current_fork_name(), &self.network_globals.as_topic_config(), &self.fork_context.spec, ); @@ -884,23 +892,23 @@ impl NetworkService { } } -/// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. +/// Returns a `Sleep` that triggers after the next change in the fork digest. /// If there is no scheduled fork, `None` is returned. -fn next_fork_delay( +fn next_digest_delay( beacon_chain: &BeaconChain, ) -> Option { beacon_chain - .duration_to_next_fork() - .map(|(_, until_fork)| tokio::time::sleep(until_fork)) + .duration_to_next_digest() + .map(|(_, until_epoch)| tokio::time::sleep(until_epoch)) } -/// Returns a `Sleep` that triggers `SUBSCRIBE_DELAY_SLOTS` before the next fork. +/// Returns a `Sleep` that triggers `SUBSCRIBE_DELAY_SLOTS` before the next fork digest changes. /// Returns `None` if there are no scheduled forks or we are already past `current_slot + SUBSCRIBE_DELAY_SLOTS > fork_slot`. -fn next_fork_subscriptions_delay( +fn next_topic_subscriptions_delay( beacon_chain: &BeaconChain, ) -> Option { - if let Some((_, duration_to_fork)) = beacon_chain.duration_to_next_fork() { - let duration_to_subscription = duration_to_fork.saturating_sub(Duration::from_secs( + if let Some((_, duration_to_epoch)) = beacon_chain.duration_to_next_digest() { + let duration_to_subscription = duration_to_epoch.saturating_sub(Duration::from_secs( beacon_chain.spec.seconds_per_slot * SUBSCRIBE_DELAY_SLOTS, )); if !duration_to_subscription.is_zero() { diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index db34211747..a8f68384a0 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -11,7 +11,7 @@ use lighthouse_network::{Enr, GossipTopic}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; -use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; +use types::{Epoch, EthSpec, MinimalEthSpec, SubnetId}; impl NetworkService { fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { @@ -106,8 +106,8 @@ fn test_removing_topic_weight_on_old_topics() { .mock_execution_layer() .build() .chain; - let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); - assert_eq!(next_fork_name, ForkName::Capella); + let (next_fork_epoch, _) = beacon_chain.duration_to_next_digest().expect("next fork"); + assert_eq!(Some(next_fork_epoch), spec.capella_fork_epoch); // Build network service. let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { @@ -189,9 +189,8 @@ fn test_removing_topic_weight_on_old_topics() { beacon_chain.slot_clock.advance_slot(); } - // Run `NetworkService::update_next_fork()`. runtime.block_on(async { - network_service.update_next_fork(); + network_service.update_next_fork_digest(); }); // Check that topic_weight on the old topics has been zeroed. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index d11a18ed0a..81b22b99e8 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -264,7 +264,7 @@ pub fn spawn( fork_context: Arc, ) { assert!( - beacon_chain.spec.max_request_blocks(fork_context.current_fork()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain.spec.max_request_blocks(fork_context.current_fork_name()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index d0e62e4ada..2f74bdc733 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -879,7 +879,7 @@ impl SyncNetworkContext { request: RequestType::DataColumnsByRoot( request .clone() - .try_into_request(self.fork_context.current_fork(), &self.chain.spec)?, + .try_into_request(self.fork_context.current_fork_name(), &self.chain.spec)?, ), app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), })?; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 385cd0fcf5..9168a3feee 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -414,7 +414,10 @@ impl> EmptyBlock for BeaconBlockAlta /// Returns an empty Altair block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { - slot: spec.genesis_slot, + slot: spec + .altair_fork_epoch + .expect("altair enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -447,7 +450,10 @@ impl> BeaconBlockAltair sync_committee_bits: BitVector::default(), }; BeaconBlockAltair { - slot: spec.genesis_slot, + slot: spec + .altair_fork_epoch + .expect("altair enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -475,7 +481,10 @@ impl> EmptyBlock for BeaconBlockBell /// Returns an empty Bellatrix block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockBellatrix { - slot: spec.genesis_slot, + slot: spec + .bellatrix_fork_epoch + .expect("bellatrix enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -503,7 +512,10 @@ impl> EmptyBlock for BeaconBlockCape /// Returns an empty Capella block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockCapella { - slot: spec.genesis_slot, + slot: spec + .capella_fork_epoch + .expect("capella enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -532,7 +544,10 @@ impl> EmptyBlock for BeaconBlockDene /// Returns an empty Deneb block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockDeneb { - slot: spec.genesis_slot, + slot: spec + .deneb_fork_epoch + .expect("deneb enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -562,7 +577,10 @@ impl> EmptyBlock for BeaconBlockElec /// Returns an empty Electra block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockElectra { - slot: spec.genesis_slot, + slot: spec + .electra_fork_epoch + .expect("electra enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -593,7 +611,10 @@ impl> EmptyBlock for BeaconBlockFulu /// Returns an empty Fulu block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockFulu { - slot: spec.genesis_slot, + slot: spec + .fulu_fork_epoch + .expect("fulu enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 631389ce43..4476cd69b3 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -2,6 +2,7 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::blob_sidecar::BlobIdentifier; use crate::data_column_sidecar::DataColumnsByRootIdentifier; use crate::*; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -245,7 +246,7 @@ pub struct ChainSpec { /* * Networking Fulu */ - blob_schedule: BlobSchedule, + pub(crate) blob_schedule: BlobSchedule, min_epochs_for_data_column_sidecars_requests: u64, /* @@ -283,27 +284,15 @@ impl ChainSpec { genesis_validators_root: Hash256, ) -> EnrForkId { EnrForkId { - fork_digest: self.fork_digest::(slot, genesis_validators_root), + fork_digest: self + .compute_fork_digest(genesis_validators_root, slot.epoch(E::slots_per_epoch())), next_fork_version: self.next_fork_version::(slot), next_fork_epoch: self - .next_fork_epoch::(slot) - .map(|(_, e)| e) + .next_digest_epoch(slot.epoch(E::slots_per_epoch())) .unwrap_or(self.far_future_epoch), } } - /// Returns the `ForkDigest` for the given slot. - /// - /// If `self.altair_fork_epoch == None`, then this function returns the genesis fork digest - /// otherwise, returns the fork digest based on the slot. - pub fn fork_digest(&self, slot: Slot, genesis_validators_root: Hash256) -> [u8; 4] { - let fork_name = self.fork_name_at_slot::(slot); - Self::compute_fork_digest( - self.fork_version_for_name(fork_name), - genesis_validators_root, - ) - } - /// Returns the `next_fork_version`. /// /// `next_fork_version = current_fork_version` if no future fork is planned, @@ -365,6 +354,11 @@ impl ChainSpec { } } + // This is `compute_fork_version` in the spec + pub fn fork_version_for_epoch(&self, epoch: Epoch) -> [u8; 4] { + self.fork_version_for_name(self.fork_name_at_epoch(epoch)) + } + /// For a given fork name, return the epoch at which it activates. pub fn fork_epoch(&self, fork_name: ForkName) -> Option { match fork_name { @@ -447,8 +441,13 @@ impl ChainSpec { .is_some_and(|fulu_fork_epoch| block_epoch >= fulu_fork_epoch) } - /// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + /// Returns true if PeerDAS is scheduled. Alias for [`Self::is_fulu_scheduled`] pub fn is_peer_das_scheduled(&self) -> bool { + self.is_fulu_scheduled() + } + + /// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + pub fn is_fulu_scheduled(&self) -> bool { self.fulu_fork_epoch .is_some_and(|fulu_fork_epoch| fulu_fork_epoch != self.far_future_epoch) } @@ -556,18 +555,69 @@ impl ChainSpec { /// /// This is a digest primarily used for domain separation on the p2p layer. /// 4-bytes suffices for practical separation of forks/chains. - pub fn compute_fork_digest( - current_version: [u8; 4], - genesis_validators_root: Hash256, - ) -> [u8; 4] { - let mut result = [0; 4]; - let root = Self::compute_fork_data_root(current_version, genesis_validators_root); - result.copy_from_slice( + pub fn compute_fork_digest(&self, genesis_validators_root: Hash256, epoch: Epoch) -> [u8; 4] { + let fork_version = self.fork_version_for_epoch(epoch); + let mut base_digest = [0u8; 4]; + let root = Self::compute_fork_data_root(fork_version, genesis_validators_root); + base_digest.copy_from_slice( root.as_slice() .get(0..4) .expect("root hash is at least 4 bytes"), ); - result + + let Some(blob_parameters) = self.get_blob_parameters(epoch) else { + return base_digest; + }; + + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => { + // Concatenate epoch and max_blobs_per_block as u64 bytes + let mut input = Vec::with_capacity(16); + input.extend_from_slice(&blob_parameters.epoch.as_u64().to_le_bytes()); + input.extend_from_slice(&blob_parameters.max_blobs_per_block.to_le_bytes()); + + // Hash the concatenated bytes + let hash = hash(&input); + + // XOR the base digest with the first 4 bytes of the hash + let mut masked_digest = [0u8; 4]; + for (i, (a, b)) in base_digest.iter().zip(hash.iter()).enumerate() { + if let Some(x) = masked_digest.get_mut(i) { + *x = a ^ b; + } + } + masked_digest + } + _ => base_digest, + } + } + + pub fn all_digest_epochs(&self) -> impl std::iter::Iterator { + let mut relevant_epochs = ForkName::list_all_fork_epochs(self) + .into_iter() + .filter_map(|(_, epoch)| epoch) + .collect::>(); + + if self.is_fulu_scheduled() { + for blob_parameters in &self.blob_schedule { + relevant_epochs.insert(blob_parameters.epoch); + } + } + let mut vec = relevant_epochs.into_iter().collect::>(); + vec.sort(); + vec.into_iter() + } + + pub fn next_digest_epoch(&self, epoch: Epoch) -> Option { + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => self + .all_digest_epochs() + .find(|digest_epoch| *digest_epoch > epoch), + _ => self + .fork_name_at_epoch(epoch) + .next_fork() + .and_then(|fork_name| self.fork_epoch(fork_name)), + } } /// Compute a domain by applying the given `fork_version`. @@ -626,17 +676,6 @@ impl ChainSpec { } } - /// Returns the highest possible value for max_request_blocks based on enabled forks. - /// - /// This is useful for upper bounds in testing. - pub fn max_request_blocks_upper_bound(&self) -> usize { - if self.deneb_fork_epoch.is_some() { - self.max_request_blocks_deneb as usize - } else { - self.max_request_blocks as usize - } - } - pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { if fork_name.electra_enabled() { self.max_request_blob_sidecars_electra as usize @@ -672,6 +711,24 @@ impl ChainSpec { } } + /// Return the blob parameters at a given epoch. + fn get_blob_parameters(&self, epoch: Epoch) -> Option { + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => self + .blob_schedule + .blob_parameters_for_epoch(epoch) + .or_else(|| { + Some(BlobParameters { + epoch: self + .electra_fork_epoch + .expect("electra fork epoch must be set if fulu epoch is set"), + max_blobs_per_block: self.max_blobs_per_block_electra, + }) + }), + _ => None, + } + } + // TODO(EIP-7892): remove this once we have fork-version changes on BPO forks pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 { if !fork_name.fulu_enabled() { @@ -1404,29 +1461,29 @@ impl Default for ChainSpec { #[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] -pub struct BPOFork { - epoch: Epoch, +pub struct BlobParameters { + pub epoch: Epoch, #[serde(with = "serde_utils::quoted_u64")] - max_blobs_per_block: u64, + pub max_blobs_per_block: u64, } -// A wrapper around a vector of BPOFork to ensure that the vector is reverse +// A wrapper around a vector of BlobParameters to ensure that the vector is reverse // sorted by epoch. #[derive(arbitrary::Arbitrary, Serialize, Debug, PartialEq, Clone)] -pub struct BlobSchedule(Vec); +pub struct BlobSchedule(Vec); impl<'de> Deserialize<'de> for BlobSchedule { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - let vec = Vec::::deserialize(deserializer)?; + let vec = Vec::::deserialize(deserializer)?; Ok(BlobSchedule::new(vec)) } } impl BlobSchedule { - pub fn new(mut vec: Vec) -> Self { + pub fn new(mut vec: Vec) -> Self { // reverse sort by epoch vec.sort_by(|a, b| b.epoch.cmp(&a.epoch)); Self(vec) @@ -1443,19 +1500,22 @@ impl BlobSchedule { .map(|entry| entry.max_blobs_per_block) } + pub fn blob_parameters_for_epoch(&self, epoch: Epoch) -> Option { + self.0.iter().find(|entry| epoch >= entry.epoch).cloned() + } + pub const fn default() -> Self { - // TODO(EIP-7892): think about what the default should be Self(vec![]) } - pub fn as_vec(&self) -> &Vec { + pub fn as_vec(&self) -> &Vec { &self.0 } } impl<'a> IntoIterator for &'a BlobSchedule { - type Item = &'a BPOFork; - type IntoIter = std::slice::Iter<'a, BPOFork>; + type Item = &'a BlobParameters; + type IntoIter = std::slice::Iter<'a, BlobParameters>; fn into_iter(self) -> Self::IntoIter { self.0.iter() @@ -1463,8 +1523,8 @@ impl<'a> IntoIterator for &'a BlobSchedule { } impl IntoIterator for BlobSchedule { - type Item = BPOFork; - type IntoIter = std::vec::IntoIter; + type Item = BlobParameters; + type IntoIter = std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() @@ -1691,7 +1751,6 @@ fn default_bellatrix_fork_version() -> [u8; 4] { } fn default_capella_fork_version() -> [u8; 4] { - // TODO: determine if the bellatrix example should be copied like this [0xff, 0xff, 0xff, 0xff] } @@ -2528,23 +2587,23 @@ mod yaml_tests { assert_eq!( config.blob_schedule.as_vec(), &vec![ - BPOFork { + BlobParameters { epoch: Epoch::new(1584), max_blobs_per_block: 20 }, - BPOFork { + BlobParameters { epoch: Epoch::new(1280), max_blobs_per_block: 9 }, - BPOFork { + BlobParameters { epoch: Epoch::new(1024), max_blobs_per_block: 18 }, - BPOFork { + BlobParameters { epoch: Epoch::new(768), max_blobs_per_block: 15 }, - BPOFork { + BlobParameters { epoch: Epoch::new(512), max_blobs_per_block: 12 }, @@ -2563,6 +2622,88 @@ mod yaml_tests { assert_eq!(spec.max_blobs_per_block_within_fork(ForkName::Fulu), 20); } + #[test] + fn blob_schedule_fork_digest() { + let spec_contents = r#" + PRESET_BASE: 'mainnet' + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384 + MIN_GENESIS_TIME: 1748264340 + GENESIS_FORK_VERSION: 0x10355025 + GENESIS_DELAY: 60 + SECONDS_PER_SLOT: 12 + SECONDS_PER_ETH1_BLOCK: 12 + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 + SHARD_COMMITTEE_PERIOD: 256 + ETH1_FOLLOW_DISTANCE: 2048 + INACTIVITY_SCORE_BIAS: 4 + INACTIVITY_SCORE_RECOVERY_RATE: 16 + EJECTION_BALANCE: 16000000000 + MIN_PER_EPOCH_CHURN_LIMIT: 4 + CHURN_LIMIT_QUOTIENT: 65536 + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + PROPOSER_SCORE_BOOST: 40 + REORG_HEAD_WEIGHT_THRESHOLD: 20 + REORG_PARENT_WEIGHT_THRESHOLD: 160 + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + DEPOSIT_CHAIN_ID: 7042643276 + DEPOSIT_NETWORK_ID: 7042643276 + DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + + ALTAIR_FORK_VERSION: 0x20355025 + ALTAIR_FORK_EPOCH: 0 + BELLATRIX_FORK_VERSION: 0x30355025 + BELLATRIX_FORK_EPOCH: 0 + CAPELLA_FORK_VERSION: 0x40355025 + CAPELLA_FORK_EPOCH: 0 + DENEB_FORK_VERSION: 0x50355025 + DENEB_FORK_EPOCH: 0 + ELECTRA_FORK_VERSION: 0x60000000 + ELECTRA_FORK_EPOCH: 9 + FULU_FORK_VERSION: 0x06000000 + FULU_FORK_EPOCH: 100 + BLOB_SCHEDULE: + - EPOCH: 9 + MAX_BLOBS_PER_BLOCK: 9 + - EPOCH: 100 + MAX_BLOBS_PER_BLOCK: 100 + - EPOCH: 150 + MAX_BLOBS_PER_BLOCK: 175 + - EPOCH: 200 + MAX_BLOBS_PER_BLOCK: 200 + - EPOCH: 250 + MAX_BLOBS_PER_BLOCK: 275 + - EPOCH: 300 + MAX_BLOBS_PER_BLOCK: 300 + "#; + let config: Config = + serde_yaml::from_str(spec_contents).expect("error while deserializing"); + let spec = + ChainSpec::from_config::(&config).expect("error while creating spec"); + + let genesis_validators_root = Hash256::from_slice(&[0; 32]); + + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(100)); + assert_eq!(digest, [0xdf, 0x67, 0x55, 0x7b]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(101)); + assert_eq!(digest, [0xdf, 0x67, 0x55, 0x7b]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(150)); + assert_eq!(digest, [0x8a, 0xb3, 0x8b, 0x59]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(199)); + assert_eq!(digest, [0x8a, 0xb3, 0x8b, 0x59]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(200)); + assert_eq!(digest, [0xd9, 0xb8, 0x14, 0x38]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(201)); + assert_eq!(digest, [0xd9, 0xb8, 0x14, 0x38]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(250)); + assert_eq!(digest, [0x4e, 0xf3, 0x2a, 0x62]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(299)); + assert_eq!(digest, [0x4e, 0xf3, 0x2a, 0x62]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(300)); + assert_eq!(digest, [0xca, 0x10, 0x0d, 0x64]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(301)); + assert_eq!(digest, [0xca, 0x10, 0x0d, 0x64]); + } + #[test] fn apply_to_spec() { let mut spec = ChainSpec::minimal(); diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 3ae7c39cfe..e3742cb96c 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -24,10 +24,14 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct EnrForkId { + /// Fork digest of the current fork computed from [`ChainSpec::compute_fork_digest`]. #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], + /// `next_fork_version` is the fork version corresponding to the next planned fork at a future + /// epoch. The fork version will only change for regular forks, not BPO forks. #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], + /// `next_fork_epoch` is the epoch at which the next fork (whether a regular fork or a BPO fork) is planned pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index a6360705ba..aeb14934f4 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -1,14 +1,39 @@ use parking_lot::RwLock; -use crate::{ChainSpec, EthSpec, ForkName, Hash256, Slot}; -use std::collections::HashMap; +use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; +use std::collections::BTreeMap; + +/// Represents a hard fork in the consensus protocol. +/// +/// A hard fork can be one of two types: +/// * A named fork (represented by `ForkName`) which introduces protocol changes. +/// * A blob-parameter-only (BPO) fork which only modifies blob parameters. +/// +/// For BPO forks, the `fork_name` remains unchanged from the previous fork, +/// but the `fork_epoch` and `fork_digest` will be different to reflect the +/// new blob parameter changes. +#[derive(Debug, Clone)] +pub struct HardFork { + fork_name: ForkName, + fork_epoch: Epoch, + fork_digest: [u8; 4], +} + +impl HardFork { + pub fn new(fork_name: ForkName, fork_digest: [u8; 4], fork_epoch: Epoch) -> HardFork { + HardFork { + fork_name, + fork_epoch, + fork_digest, + } + } +} /// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. #[derive(Debug)] pub struct ForkContext { - current_fork: RwLock, - fork_to_digest: HashMap, - digest_to_fork: HashMap<[u8; 4], ForkName>, + current_fork: RwLock, + epoch_to_forks: BTreeMap, pub spec: ChainSpec, } @@ -22,74 +47,233 @@ impl ForkContext { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> Self { - let fork_to_digest: HashMap = ForkName::list_all() - .into_iter() - .filter_map(|fork| { - if spec.fork_epoch(fork).is_some() { - Some(( - fork, - ChainSpec::compute_fork_digest( - spec.fork_version_for_name(fork), - genesis_validators_root, - ), - )) - } else { - None - } + let epoch_to_forks: BTreeMap<_, _> = spec + .all_digest_epochs() + .map(|epoch| { + let fork_name = spec.fork_name_at_epoch(epoch); + let fork_digest = spec.compute_fork_digest(genesis_validators_root, epoch); + (epoch, HardFork::new(fork_name, fork_digest, epoch)) }) .collect(); - let digest_to_fork = fork_to_digest - .clone() - .into_iter() - .map(|(k, v)| (v, k)) - .collect(); + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let current_fork = epoch_to_forks + .values() + .filter(|&fork| fork.fork_epoch <= current_epoch) + .next_back() + .cloned() + .expect("should match at least genesis epoch"); Self { - current_fork: RwLock::new(spec.fork_name_at_slot::(current_slot)), - fork_to_digest, - digest_to_fork, + current_fork: RwLock::new(current_fork), + epoch_to_forks, spec: spec.clone(), } } /// Returns `true` if the provided `fork_name` exists in the `ForkContext` object. pub fn fork_exists(&self, fork_name: ForkName) -> bool { - self.fork_to_digest.contains_key(&fork_name) + self.spec.fork_epoch(fork_name).is_some() } - /// Returns the `current_fork`. - pub fn current_fork(&self) -> ForkName { - *self.current_fork.read() + /// Returns the current fork name. + pub fn current_fork_name(&self) -> ForkName { + self.current_fork.read().fork_name } - /// Updates the `current_fork` field to a new fork. - pub fn update_current_fork(&self, new_fork: ForkName) { - *self.current_fork.write() = new_fork; + /// Returns the current fork epoch. + pub fn current_fork_epoch(&self) -> Epoch { + self.current_fork.read().fork_epoch + } + + /// Returns the current fork digest. + pub fn current_fork_digest(&self) -> [u8; 4] { + self.current_fork.read().fork_digest + } + + /// Returns the next fork digest. If there's no future fork, returns the current fork digest. + pub fn next_fork_digest(&self) -> Option<[u8; 4]> { + let current_fork_epoch = self.current_fork_epoch(); + self.epoch_to_forks + .range(current_fork_epoch..) + .nth(1) + .map(|(_, fork)| fork.fork_digest) + } + + /// Updates the `digest_epoch` field to a new digest epoch. + pub fn update_current_fork( + &self, + new_fork_name: ForkName, + new_fork_digest: [u8; 4], + new_fork_epoch: Epoch, + ) { + debug_assert!(self.epoch_to_forks.contains_key(&new_fork_epoch)); + *self.current_fork.write() = HardFork::new(new_fork_name, new_fork_digest, new_fork_epoch); } /// Returns the context bytes/fork_digest corresponding to the genesis fork version. pub fn genesis_context_bytes(&self) -> [u8; 4] { - *self - .fork_to_digest - .get(&ForkName::Base) - .expect("ForkContext must contain genesis context bytes") + self.epoch_to_forks + .first_key_value() + .expect("must contain genesis epoch") + .1 + .fork_digest } /// Returns the fork type given the context bytes/fork_digest. /// Returns `None` if context bytes doesn't correspond to any valid `ForkName`. - pub fn from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> { - self.digest_to_fork.get(&context) + pub fn get_fork_from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> { + self.epoch_to_forks + .values() + .find(|fork| fork.fork_digest == context) + .map(|fork| &fork.fork_name) } - /// Returns the context bytes/fork_digest corresponding to a fork name. - /// Returns `None` if the `ForkName` has not been initialized. - pub fn to_context_bytes(&self, fork_name: ForkName) -> Option<[u8; 4]> { - self.fork_to_digest.get(&fork_name).cloned() + /// Returns the context bytes/fork_digest corresponding to an epoch. + /// See [`ChainSpec::compute_fork_digest`] + pub fn context_bytes(&self, epoch: Epoch) -> [u8; 4] { + self.epoch_to_forks + .range(..=epoch) + .next_back() + .expect("should match at least genesis epoch") + .1 + .fork_digest } /// Returns all `fork_digest`s that are currently in the `ForkContext` object. pub fn all_fork_digests(&self) -> Vec<[u8; 4]> { - self.digest_to_fork.keys().cloned().collect() + self.epoch_to_forks + .values() + .map(|fork| fork.fork_digest) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::chain_spec::{BlobParameters, BlobSchedule}; + use crate::MainnetEthSpec; + + type E = MainnetEthSpec; + + fn make_chain_spec() -> ChainSpec { + let blob_parameters = vec![ + BlobParameters { + epoch: Epoch::new(6), + max_blobs_per_block: 12, + }, + BlobParameters { + epoch: Epoch::new(50), + max_blobs_per_block: 24, + }, + BlobParameters { + epoch: Epoch::new(100), + max_blobs_per_block: 48, + }, + ]; + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(1)); + spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + spec.capella_fork_epoch = Some(Epoch::new(3)); + spec.deneb_fork_epoch = Some(Epoch::new(4)); + spec.electra_fork_epoch = Some(Epoch::new(5)); + spec.fulu_fork_epoch = Some(Epoch::new(6)); + spec.blob_schedule = BlobSchedule::new(blob_parameters); + spec + } + + #[test] + fn test_fork_exists() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(7); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + assert!(context.fork_exists(ForkName::Electra)); + assert!(context.fork_exists(ForkName::Fulu)); + } + + #[test] + fn test_current_fork_name_and_epoch() { + let spec = make_chain_spec(); + let electra_epoch = spec.electra_fork_epoch.unwrap(); + let electra_slot = electra_epoch.end_slot(E::slots_per_epoch()); + let genesis_root = Hash256::ZERO; + + let context = ForkContext::new::(electra_slot, genesis_root, &spec); + + assert_eq!(context.current_fork_name(), ForkName::Electra); + assert_eq!(context.current_fork_epoch(), electra_epoch); + } + + #[test] + fn test_next_fork_digest() { + let spec = make_chain_spec(); + let electra_epoch = spec.electra_fork_epoch.unwrap(); + let electra_slot = electra_epoch.end_slot(E::slots_per_epoch()); + let genesis_root = Hash256::ZERO; + + let context = ForkContext::new::(electra_slot, genesis_root, &spec); + + let next_digest = context.next_fork_digest().unwrap(); + let expected_digest = spec.compute_fork_digest(genesis_root, spec.fulu_fork_epoch.unwrap()); + assert_eq!(next_digest, expected_digest); + } + + #[test] + fn test_get_fork_from_context_bytes() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(0); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + let electra_digest = spec.compute_fork_digest(genesis_root, Epoch::new(5)); + assert_eq!( + context.get_fork_from_context_bytes(electra_digest), + Some(&ForkName::Electra) + ); + + let invalid_digest = [9, 9, 9, 9]; + assert!(context + .get_fork_from_context_bytes(invalid_digest) + .is_none()); + } + + #[test] + fn test_context_bytes() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(0); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + assert_eq!( + context.context_bytes(Epoch::new(0)), + spec.compute_fork_digest(genesis_root, Epoch::new(0)) + ); + + assert_eq!( + context.context_bytes(Epoch::new(12)), + spec.compute_fork_digest(genesis_root, Epoch::new(10)) + ); + } + + #[test] + fn test_all_fork_digests() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(20); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + // Get all enabled fork digests + let fork_digests = context.all_fork_digests(); + let expected_digest_count = spec.all_digest_epochs().count(); + + assert_eq!(fork_digests.len(), expected_digest_count); } } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e92db49485..4fc26ccffa 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -36,8 +36,6 @@ impl ForkName { pub fn list_all_fork_epochs(spec: &ChainSpec) -> Vec<(ForkName, Option)> { ForkName::list_all() .into_iter() - // Skip Base - .skip(1) .map(|fork| (fork, spec.fork_epoch(fork))) .collect() } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 85bed35a19..64dce93aef 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -883,11 +883,25 @@ mod test { } } + fn spec_with_all_forks_enabled() -> ChainSpec { + let mut chain_spec = E::default_spec(); + chain_spec.altair_fork_epoch = Some(Epoch::new(1)); + chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + chain_spec.capella_fork_epoch = Some(Epoch::new(3)); + chain_spec.deneb_fork_epoch = Some(Epoch::new(4)); + chain_spec.electra_fork_epoch = Some(Epoch::new(5)); + chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); + + // check that we have all forks covered + assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); + chain_spec + } + #[test] fn test_ssz_tagged_signed_beacon_block() { type E = MainnetEthSpec; - let spec = &E::default_spec(); + let spec = &spec_with_all_forks_enabled::(); let sig = Signature::empty(); let blocks = vec![ SignedBeaconBlock::::from_block( diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index e1acac12df..98ef0b96d4 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -32,12 +32,13 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str let secp256k1_keypair = secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair); + let genesis_fork_digest = spec.compute_fork_digest(Hash256::zero(), Epoch::new(0)); let enr_fork_id = EnrForkId { - fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()), + fork_digest: genesis_fork_digest, next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec) + let enr = build_enr::(&enr_key, &config, &enr_fork_id, genesis_fork_digest, spec) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml index de3020a884..a5093631b4 100644 --- a/scripts/tests/checkpoint-sync-config-devnet.yaml +++ b/scripts/tests/checkpoint-sync-config-devnet.yaml @@ -3,18 +3,18 @@ participants: - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 supernode: true - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 supernode: false checkpoint_sync_enabled: true -checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-1.ethpandaops.io" +checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-2.ethpandaops.io" global_log_level: debug network_params: - network: fusaka-devnet-1 + network: fusaka-devnet-2 From 90ff64381e894728e26447bb866047b6cc90dd15 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 11 Jul 2025 17:02:30 -0700 Subject: [PATCH 31/44] Sync peer attribution (#7733) Which issue # does this PR address? Closes #7604 Improvements to range sync including: 1. Contain column requests only to peers that are part of the SyncingChain 2. Attribute the fault to the correct peer and downscore them if they don't return the data columns for the request 3. Improve sync performance by retrying only the failed columns from other peers instead of failing the entire batch 4. Uses the earliest_available_slot to make requests to peers that claim to have the epoch. Note: if no earliest_available_slot info is available, fallback to using previous logic i.e. assume peer has everything backfilled upto WS checkpoint/da boundary Tested this on fusaka-devnet-2 with a full node and supernode and the recovering logic seems to works well. Also tested this a little on mainnet. Need to do more testing and possibly add some unit tests. --- .../src/peer_manager/peerdb.rs | 45 ++++ .../src/peer_manager/peerdb/sync_status.rs | 13 ++ .../src/service/api_types.rs | 7 + .../src/sync/block_sidecar_coupling.rs | 219 ++++++++++++++---- .../network/src/sync/network_context.rs | 115 ++++++++- .../network/src/sync/range_sync/batch.rs | 2 +- .../network/src/sync/range_sync/chain.rs | 82 ++++++- beacon_node/network/src/sync/tests/range.rs | 46 ++-- scripts/tests/genesis-sync-config-fulu.yaml | 7 +- 9 files changed, 437 insertions(+), 99 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index b28807c47e..6559b24724 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -248,6 +248,34 @@ impl PeerDB { .map(|(peer_id, _)| peer_id) } + /// Returns all the synced peers from the list of allowed peers that claim to have the block + /// components for the given epoch based on `status.earliest_available_slot`. + /// + /// If `earliest_available_slot` info is not available, then return peer anyway assuming it has the + /// required data. + pub fn synced_peers_for_epoch<'a>( + &'a self, + epoch: Epoch, + allowed_peers: &'a HashSet, + ) -> impl Iterator { + self.peers + .iter() + .filter(move |(peer_id, info)| { + allowed_peers.contains(peer_id) + && info.is_connected() + && match info.sync_status() { + SyncStatus::Synced { info } => { + info.has_slot(epoch.end_slot(E::slots_per_epoch())) + } + SyncStatus::Advanced { info } => { + info.has_slot(epoch.end_slot(E::slots_per_epoch())) + } + _ => false, + } + }) + .map(|(peer_id, _)| peer_id) + } + /// Gives the `peer_id` of all known connected and advanced peers. pub fn advanced_peers(&self) -> impl Iterator { self.peers @@ -291,6 +319,23 @@ impl PeerDB { .map(|(peer_id, _)| peer_id) } + /// Returns an iterator of all peers that are supposed to be custodying + /// the given subnet id that also belong to `allowed_peers`. + pub fn good_range_sync_custody_subnet_peer<'a>( + &'a self, + subnet: DataColumnSubnetId, + allowed_peers: &'a HashSet, + ) -> impl Iterator { + self.peers + .iter() + .filter(move |(peer_id, info)| { + // The custody_subnets hashset can be populated via enr or metadata + let is_custody_subnet_peer = info.is_assigned_to_custody_subnet(&subnet); + allowed_peers.contains(peer_id) && info.is_connected() && is_custody_subnet_peer + }) + .map(|(peer_id, _)| peer_id) + } + /// Gives the ids of all known disconnected peers. pub fn disconnected_peers(&self) -> impl Iterator { self.peers diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs index 5a4fc33994..91e2156a27 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs @@ -28,6 +28,19 @@ pub struct SyncInfo { pub earliest_available_slot: Option, } +impl SyncInfo { + /// Returns true if the provided slot is greater than or equal to the peer's `earliest_available_slot`. + /// + /// If `earliest_available_slot` is None, then we just assume that the peer has the slot. + pub fn has_slot(&self, slot: Slot) -> bool { + if let Some(earliest_available_slot) = self.earliest_available_slot { + slot >= earliest_available_slot + } else { + true + } + } +} + impl std::cmp::PartialEq for SyncStatus { fn eq(&self, other: &Self) -> bool { matches!( diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index b36f8cc215..3013596f9f 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,4 +1,5 @@ use crate::rpc::methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}; +use libp2p::PeerId; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ @@ -61,6 +62,11 @@ pub struct DataColumnsByRangeRequestId { pub id: Id, /// The Id of the overall By Range request for block components. pub parent_request_id: ComponentsByRangeRequestId, + /// The peer id associated with the request. + /// + /// This is useful to penalize the peer at a later point if it returned data columns that + /// did not match with the verified block. + pub peer: PeerId, } /// Block components by range request for range sync. Includes an ID for downstream consumers to @@ -306,6 +312,7 @@ mod tests { batch_id: Epoch::new(0), }, }, + peer: PeerId::random(), }; assert_eq!(format!("{id}"), "123/122/RangeSync/0/54"); } diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 0418ab4553..4653daa44a 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,15 +1,17 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; -use lighthouse_network::service::api_types::{ - BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, +use lighthouse_network::{ + service::api_types::{ + BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, + }, + PeerAction, PeerId, }; use std::{collections::HashMap, sync::Arc}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, }; - pub struct RangeBlockComponentsRequest { /// Blocks we have received awaiting for their corresponding sidecar. blocks_request: ByRangeRequest>>>, @@ -30,24 +32,38 @@ enum RangeBlockDataRequest { DataColumnsByRangeRequestId, ByRangeRequest>, >, + /// The column indices corresponding to the request + column_peers: HashMap>, expected_custody_columns: Vec, }, } +#[derive(Debug)] +pub struct CouplingError { + pub(crate) msg: String, + pub(crate) column_and_peer: Option<(Vec<(ColumnIndex, PeerId)>, PeerAction)>, +} + impl RangeBlockComponentsRequest { + #[allow(clippy::type_complexity)] pub fn new( blocks_req_id: BlocksByRangeRequestId, blobs_req_id: Option, - data_columns: Option<(Vec, Vec)>, + data_columns: Option<( + Vec<(DataColumnsByRangeRequestId, Vec)>, + Vec, + )>, ) -> Self { let block_data_request = if let Some(blobs_req_id) = blobs_req_id { RangeBlockDataRequest::Blobs(ByRangeRequest::Active(blobs_req_id)) } else if let Some((requests, expected_custody_columns)) = data_columns { + let column_peers: HashMap<_, _> = requests.into_iter().collect(); RangeBlockDataRequest::DataColumns { - requests: requests - .into_iter() - .map(|id| (id, ByRangeRequest::Active(id))) + requests: column_peers + .keys() + .map(|id| (*id, ByRangeRequest::Active(*id))) .collect(), + column_peers, expected_custody_columns, } } else { @@ -60,6 +76,28 @@ impl RangeBlockComponentsRequest { } } + /// Modifies `self` by inserting a new `DataColumnsByRangeRequestId` for a formerly failed + /// request for some columns. + pub fn reinsert_failed_column_requests( + &mut self, + failed_column_requests: Vec<(DataColumnsByRangeRequestId, Vec)>, + ) -> Result<(), String> { + match &mut self.block_data_request { + RangeBlockDataRequest::DataColumns { + requests, + expected_custody_columns: _, + column_peers, + } => { + for (request, columns) in failed_column_requests.into_iter() { + requests.insert(request, ByRangeRequest::Active(request)); + column_peers.insert(request, columns); + } + Ok(()) + } + _ => Err("not a column request".to_string()), + } + } + pub fn add_blocks( &mut self, req_id: BlocksByRangeRequestId, @@ -105,12 +143,15 @@ impl RangeBlockComponentsRequest { } } - pub fn responses(&self, spec: &ChainSpec) -> Option>, String>> { + pub fn responses( + &mut self, + spec: &ChainSpec, + ) -> Option>, CouplingError>> { let Some(blocks) = self.blocks_request.to_finished() else { return None; }; - match &self.block_data_request { + match &mut self.block_data_request { RangeBlockDataRequest::NoData => { Some(Self::responses_with_blobs(blocks.to_vec(), vec![], spec)) } @@ -127,8 +168,10 @@ impl RangeBlockComponentsRequest { RangeBlockDataRequest::DataColumns { requests, expected_custody_columns, + column_peers, } => { let mut data_columns = vec![]; + let mut column_to_peer_id: HashMap = HashMap::new(); for req in requests.values() { let Some(data) = req.to_finished() else { return None; @@ -136,12 +179,33 @@ impl RangeBlockComponentsRequest { data_columns.extend(data.clone()) } - Some(Self::responses_with_custody_columns( + // Note: this assumes that only 1 peer is responsible for a column + // with a batch. + for (id, columns) in column_peers { + for column in columns { + column_to_peer_id.insert(*column, id.peer); + } + } + + let resp = Self::responses_with_custody_columns( blocks.to_vec(), data_columns, + column_to_peer_id, expected_custody_columns, spec, - )) + ); + + if let Err(err) = &resp { + if let Some((peers, _)) = &err.column_and_peer { + for (_, peer) in peers.iter() { + // find the req id associated with the peer and + // delete it from the entries + requests.retain(|&k, _| k.peer != *peer); + } + } + } + + Some(resp) } } } @@ -150,7 +214,7 @@ impl RangeBlockComponentsRequest { blocks: Vec>>, blobs: Vec>>, spec: &ChainSpec, - ) -> Result>, String> { + ) -> Result>, CouplingError> { // There can't be more more blobs than blocks. i.e. sending any blob (empty // included) for a skipped slot is not permitted. let mut responses = Vec::with_capacity(blocks.len()); @@ -165,17 +229,26 @@ impl RangeBlockComponentsRequest { .unwrap_or(false); pair_next_blob } { - blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); + blob_list.push(blob_iter.next().ok_or_else(|| CouplingError { + msg: "Missing next blob".to_string(), + column_and_peer: None, + })?); } let mut blobs_buffer = vec![None; max_blobs_per_block]; for blob in blob_list { let blob_index = blob.index as usize; let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { - return Err("Invalid blob index".to_string()); + return Err(CouplingError { + msg: "Invalid blob index".to_string(), + column_and_peer: None, + }); }; if blob_opt.is_some() { - return Err("Repeat blob index".to_string()); + return Err(CouplingError { + msg: "Repeat blob index".to_string(), + column_and_peer: None, + }); } else { *blob_opt = Some(blob); } @@ -184,13 +257,24 @@ impl RangeBlockComponentsRequest { blobs_buffer.into_iter().flatten().collect::>(), max_blobs_per_block, ) - .map_err(|_| "Blobs returned exceeds max length".to_string())?; - responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) + .map_err(|_| CouplingError { + msg: "Blobs returned exceeds max length".to_string(), + column_and_peer: None, + })?; + responses.push( + RpcBlock::new(None, block, Some(blobs)).map_err(|e| CouplingError { + msg: format!("{e:?}"), + column_and_peer: None, + })?, + ) } // if accumulated sidecars is not empty, throw an error. if blob_iter.next().is_some() { - return Err("Received sidecars that don't pair well".to_string()); + return Err(CouplingError { + msg: "Received sidecars that don't pair well".to_string(), + column_and_peer: None, + }); } Ok(responses) @@ -199,9 +283,10 @@ impl RangeBlockComponentsRequest { fn responses_with_custody_columns( blocks: Vec>>, data_columns: DataColumnSidecarList, + column_to_peer: HashMap, expects_custody_columns: &[ColumnIndex], spec: &ChainSpec, - ) -> Result>, String> { + ) -> Result>, CouplingError> { // Group data columns by block_root and index let mut data_columns_by_block = HashMap::>>>::new(); @@ -215,9 +300,10 @@ impl RangeBlockComponentsRequest { .insert(index, column) .is_some() { - return Err(format!( - "Repeated column block_root {block_root:?} index {index}" - )); + return Err(CouplingError { + msg: format!("Repeated column block_root {block_root:?} index {index}"), + column_and_peer: None, + }); } } @@ -235,30 +321,61 @@ impl RangeBlockComponentsRequest { // TODO(das): on the initial version of PeerDAS the beacon chain does not check // rpc custody requirements and dropping this check can allow the block to have // an inconsistent DB. - return Err(format!("No columns for block {block_root:?} with data")); + + // For now, we always assume that the block peer is right. + // This is potentially dangerous as we can get isolated on a chain with a + // malicious block peer. + // TODO: fix this by checking the proposer signature before downloading columns. + let responsible_peers = column_to_peer.iter().map(|c| (*c.0, *c.1)).collect(); + return Err(CouplingError { + msg: format!("No columns for block {block_root:?} with data"), + column_and_peer: Some((responsible_peers, PeerAction::LowToleranceError)), + }); }; let mut custody_columns = vec![]; + let mut naughty_peers = vec![]; for index in expects_custody_columns { - let Some(data_column) = data_columns_by_index.remove(index) else { - return Err(format!("No column for block {block_root:?} index {index}")); - }; - // Safe to convert to `CustodyDataColumn`: we have asserted that the index of - // this column is in the set of `expects_custody_columns` and with the expected - // block root, so for the expected epoch of this batch. - custody_columns.push(CustodyDataColumn::from_asserted_custody(data_column)); + if let Some(data_column) = data_columns_by_index.remove(index) { + // Safe to convert to `CustodyDataColumn`: we have asserted that the index of + // this column is in the set of `expects_custody_columns` and with the expected + // block root, so for the expected epoch of this batch. + custody_columns.push(CustodyDataColumn::from_asserted_custody(data_column)); + } else { + // Penalize the peer for claiming to have the columns but not returning + // them + let Some(responsible_peer) = column_to_peer.get(index) else { + return Err(CouplingError { + msg: format!("Internal error, no request made for column {}", index), + column_and_peer: None, + }); + }; + naughty_peers.push((*index, *responsible_peer)); + } + } + if !naughty_peers.is_empty() { + return Err(CouplingError { + msg: format!("Peers did not return column for block_root {block_root:?} {naughty_peers:?}"), + column_and_peer: Some((naughty_peers, PeerAction::LowToleranceError)), + }); } // Assert that there are no columns left if !data_columns_by_index.is_empty() { let remaining_indices = data_columns_by_index.keys().collect::>(); - return Err(format!( - "Not all columns consumed for block {block_root:?}: {remaining_indices:?}" - )); + // log the error but don't return an error, we can still progress with extra columns. + tracing::error!( + ?block_root, + ?remaining_indices, + "Not all columns consumed for block" + ); } RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, spec) - .map_err(|e| format!("{e:?}"))? + .map_err(|e| CouplingError { + msg: format!("{:?}", e), + column_and_peer: None, + })? } else { // Block has no data, expects zero columns RpcBlock::new_without_blobs(Some(block_root), block) @@ -268,7 +385,9 @@ impl RangeBlockComponentsRequest { // Assert that there are no columns left for other blocks if !data_columns_by_block.is_empty() { let remaining_roots = data_columns_by_block.keys().collect::>(); - return Err(format!("Not all columns consumed: {remaining_roots:?}")); + // log the error but don't return an error, we can still progress with responses. + // this is most likely an internal error with overrequesting or a client bug. + tracing::error!(?remaining_roots, "Not all columns consumed for block"); } Ok(rpc_blocks) @@ -303,9 +422,12 @@ mod tests { use beacon_chain::test_utils::{ generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs, }; - use lighthouse_network::service::api_types::{ - BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, - DataColumnsByRangeRequestId, Id, RangeRequestId, + use lighthouse_network::{ + service::api_types::{ + BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, + DataColumnsByRangeRequestId, Id, RangeRequestId, + }, + PeerId, }; use rand::SeedableRng; use std::sync::Arc; @@ -342,10 +464,11 @@ mod tests { DataColumnsByRangeRequestId { id, parent_request_id, + peer: PeerId::random(), } } - fn is_finished(info: &RangeBlockComponentsRequest) -> bool { + fn is_finished(info: &mut RangeBlockComponentsRequest) -> bool { let spec = test_spec::(); info.responses(&spec).is_some() } @@ -428,7 +551,7 @@ mod tests { let columns_req_id = expects_custody_columns .iter() .enumerate() - .map(|(i, _)| columns_id(i as Id, components_id)) + .map(|(i, column)| (columns_id(i as Id, components_id), vec![*column])) .collect::>(); let mut info = RangeBlockComponentsRequest::::new( blocks_req_id, @@ -442,12 +565,13 @@ mod tests { ) .unwrap(); // Assert response is not finished - assert!(!is_finished(&info)); + assert!(!is_finished(&mut info)); // Send data columns for (i, &column_index) in expects_custody_columns.iter().enumerate() { + let (req, _columns) = columns_req_id.get(i).unwrap(); info.add_custody_columns( - columns_req_id.get(i).copied().unwrap(), + *req, blocks .iter() .flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned()) @@ -457,7 +581,7 @@ mod tests { if i < expects_custody_columns.len() - 1 { assert!( - !is_finished(&info), + !is_finished(&mut info), "requested should not be finished at loop {i}" ); } @@ -485,7 +609,7 @@ mod tests { let columns_req_id = batched_column_requests .iter() .enumerate() - .map(|(i, _)| columns_id(i as Id, components_id)) + .map(|(i, columns)| (columns_id(i as Id, components_id), columns.clone())) .collect::>(); let mut info = RangeBlockComponentsRequest::::new( @@ -513,12 +637,13 @@ mod tests { ) .unwrap(); // Assert response is not finished - assert!(!is_finished(&info)); + assert!(!is_finished(&mut info)); for (i, column_indices) in batched_column_requests.iter().enumerate() { + let (req, _columns) = columns_req_id.get(i).unwrap(); // Send the set of columns in the same batch request info.add_custody_columns( - columns_req_id.get(i).copied().unwrap(), + *req, blocks .iter() .flat_map(|b| { @@ -532,7 +657,7 @@ mod tests { if i < num_of_data_column_requests - 1 { assert!( - !is_finished(&info), + !is_finished(&mut info), "requested should not be finished at loop {i}" ); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 2f74bdc733..a62b8f7382 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -14,6 +14,7 @@ use crate::network_beacon_processor::TestBeaconChainType; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::SingleLookupId; +use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; @@ -81,7 +82,7 @@ pub enum RpcResponseError { RpcError(#[allow(dead_code)] RPCError), VerifyError(LookupVerifyError), CustodyRequestError(#[allow(dead_code)] CustodyRequestError), - BlockComponentCouplingError(#[allow(dead_code)] String), + BlockComponentCouplingError(CouplingError), } #[derive(Debug, PartialEq, Eq)] @@ -441,6 +442,79 @@ impl SyncNetworkContext { active_request_count_by_peer } + /// Retries only the specified failed columns by requesting them again. + /// + /// Note: This function doesn't retry the whole batch, but retries specific requests within + /// the batch. + pub fn retry_columns_by_range( + &mut self, + request_id: Id, + peers: &HashSet, + peers_to_deprioritize: &HashSet, + request: BlocksByRangeRequest, + failed_columns: &HashSet, + ) -> Result<(), String> { + let Some(requester) = self.components_by_range_requests.keys().find_map(|r| { + if r.id == request_id { + Some(r.requester) + } else { + None + } + }) else { + return Err("request id not present".to_string()); + }; + + let active_request_count_by_peer = self.active_request_count_by_peer(); + + debug!( + ?failed_columns, + "Retrying only failed column requests from other peers" + ); + + // Attempt to find all required custody peers to request the failed columns from + let columns_by_range_peers_to_request = self + .select_columns_by_range_peers_to_request( + failed_columns, + peers, + active_request_count_by_peer, + peers_to_deprioritize, + ) + .map_err(|e| format!("{:?}", e))?; + + // Reuse the id for the request that received partially correct responses + let id = ComponentsByRangeRequestId { + id: request_id, + requester, + }; + + let data_column_requests = columns_by_range_peers_to_request + .into_iter() + .map(|(peer_id, columns)| { + self.send_data_columns_by_range_request( + peer_id, + DataColumnsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + columns, + }, + id, + ) + }) + .collect::, _>>() + .map_err(|e| format!("{:?}", e))?; + + // instead of creating a new `RangeBlockComponentsRequest`, we reinsert + // the new requests created for the failed requests + let Some(range_request) = self.components_by_range_requests.get_mut(&id) else { + return Err( + "retrying custody request for range request that does not exist".to_string(), + ); + }; + + range_request.reinsert_failed_column_requests(data_column_requests)?; + Ok(()) + } + /// A blocks by range request sent by the range sync algorithm pub fn block_components_by_range_request( &mut self, @@ -619,20 +693,31 @@ impl SyncNetworkContext { let request = entry.get_mut(); match range_block_component { RangeBlockComponent::Block(req_id, resp) => resp.and_then(|(blocks, _)| { - request - .add_blocks(req_id, blocks) - .map_err(RpcResponseError::BlockComponentCouplingError) + request.add_blocks(req_id, blocks).map_err(|e| { + RpcResponseError::BlockComponentCouplingError(CouplingError { + msg: e, + column_and_peer: None, + }) + }) }), RangeBlockComponent::Blob(req_id, resp) => resp.and_then(|(blobs, _)| { - request - .add_blobs(req_id, blobs) - .map_err(RpcResponseError::BlockComponentCouplingError) + request.add_blobs(req_id, blobs).map_err(|e| { + RpcResponseError::BlockComponentCouplingError(CouplingError { + msg: e, + column_and_peer: None, + }) + }) }), RangeBlockComponent::CustodyColumns(req_id, resp) => { resp.and_then(|(custody_columns, _)| { request .add_custody_columns(req_id, custody_columns) - .map_err(RpcResponseError::BlockComponentCouplingError) + .map_err(|e| { + RpcResponseError::BlockComponentCouplingError(CouplingError { + msg: e, + column_and_peer: None, + }) + }) }) } } @@ -641,8 +726,12 @@ impl SyncNetworkContext { return Some(Err(e)); } - if let Some(blocks_result) = entry.get().responses(&self.chain.spec) { - entry.remove(); + if let Some(blocks_result) = entry.get_mut().responses(&self.chain.spec) { + if blocks_result.is_ok() { + // remove the entry only if it coupled successfully with + // no errors + entry.remove(); + } // If the request is finished, dequeue everything Some(blocks_result.map_err(RpcResponseError::BlockComponentCouplingError)) } else { @@ -1075,10 +1164,12 @@ impl SyncNetworkContext { peer_id: PeerId, request: DataColumnsByRangeRequest, parent_request_id: ComponentsByRangeRequestId, - ) -> Result { + ) -> Result<(DataColumnsByRangeRequestId, Vec), RpcRequestSendError> { + let requested_columns = request.columns.clone(); let id = DataColumnsByRangeRequestId { id: self.next_id(), parent_request_id, + peer: peer_id, }; self.send_network_msg(NetworkMessage::SendRequest { @@ -1106,7 +1197,7 @@ impl SyncNetworkContext { false, DataColumnsByRangeRequestItems::new(request), ); - Ok(id) + Ok((id, requested_columns)) } pub fn is_execution_engine_online(&self) -> bool { diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 264f83ee82..e31930075a 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -89,6 +89,7 @@ pub enum BatchOperationOutcome { Failed { blacklist: bool }, } +#[derive(Debug)] pub enum BatchProcessingResult { Success, FaultyFailure, @@ -364,7 +365,6 @@ impl BatchInfo { } } - #[must_use = "Batch may have failed"] pub fn processing_completed( &mut self, procesing_result: BatchProcessingResult, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index e3794bd2be..0e9178f0f8 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -2,6 +2,7 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use super::RangeSyncType; use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; +use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult}; use beacon_chain::block_verification_types::RpcBlock; @@ -12,7 +13,7 @@ use logging::crit; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use strum::IntoStaticStr; use tracing::{debug, instrument, warn}; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{ColumnIndex, Epoch, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for @@ -826,11 +827,37 @@ impl SyncingChain { ) -> ProcessingResult { let batch_state = self.visualize_batch_state(); if let Some(batch) = self.batches.get_mut(&batch_id) { + if let RpcResponseError::BlockComponentCouplingError(CouplingError { + column_and_peer, + msg, + }) = &err + { + debug!(?batch_id, msg, "Block components coupling error"); + // Note: we don't fail the batch here because a `CouplingError` is + // recoverable by requesting from other honest peers. + if let Some((column_and_peer, action)) = column_and_peer { + let mut failed_columns = HashSet::new(); + let mut failed_peers = HashSet::new(); + for (column, peer) in column_and_peer { + failed_columns.insert(*column); + failed_peers.insert(*peer); + } + for peer in failed_peers.iter() { + network.report_peer(*peer, *action, "failed to return columns"); + } + + return self.retry_partial_batch( + network, + batch_id, + request_id, + failed_columns, + failed_peers, + ); + } + } // A batch could be retried without the peer failing the request (disconnecting/ // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer - // TODO(das): removed peer_id matching as the node may request a different peer for data - // columns. if !batch.is_expecting_block(&request_id) { debug!( batch_epoch = %batch_id, @@ -891,7 +918,7 @@ impl SyncingChain { .network_globals() .peers .read() - .synced_peers() + .synced_peers_for_epoch(batch_id, &self.peers) .cloned() .collect::>(); @@ -951,6 +978,50 @@ impl SyncingChain { Ok(KeepChain) } + /// Retries partial column requests within the batch by creating new requests for the failed columns. + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] + pub fn retry_partial_batch( + &mut self, + network: &mut SyncNetworkContext, + batch_id: BatchId, + id: Id, + failed_columns: HashSet, + mut failed_peers: HashSet, + ) -> ProcessingResult { + if let Some(batch) = self.batches.get_mut(&batch_id) { + failed_peers.extend(&batch.failed_peers()); + let req = batch.to_blocks_by_range_request().0; + + let synced_peers = network + .network_globals() + .peers + .read() + .synced_peers() + .cloned() + .collect::>(); + + match network.retry_columns_by_range( + id, + &synced_peers, + &failed_peers, + req, + &failed_columns, + ) { + Ok(_) => { + debug!( + ?batch_id, + id, "Retried column requests from different peers" + ); + return Ok(KeepChain); + } + Err(e) => { + debug!(?batch_id, id, e, "Failed to retry partial batch"); + } + } + } + Ok(KeepChain) + } + /// Returns true if this chain is currently syncing. pub fn is_syncing(&self) -> bool { match self.state { @@ -1031,9 +1102,8 @@ impl SyncingChain { .network_globals() .peers .read() - .good_custody_subnet_peer(*subnet_id) + .good_range_sync_custody_subnet_peer(*subnet_id, &self.peers) .count(); - peer_count > 0 }); peers_on_all_custody_subnets diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index fa1e057765..7c184d3b39 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -77,7 +77,7 @@ impl TestRig { /// Produce a head peer with an advanced head fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { let local_info = self.local_info(); - self.add_random_peer(SyncInfo { + self.add_supernode_peer(SyncInfo { head_root, head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), ..local_info @@ -93,7 +93,7 @@ impl TestRig { fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { let local_info = self.local_info(); let finalized_epoch = local_info.finalized_epoch + 2; - self.add_random_peer(SyncInfo { + self.add_supernode_peer(SyncInfo { finalized_epoch, finalized_root, head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), @@ -132,13 +132,13 @@ impl TestRig { } } - fn add_random_peer_not_supernode(&mut self, remote_info: SyncInfo) -> PeerId { + fn add_fullnode_peer(&mut self, remote_info: SyncInfo) -> PeerId { let peer_id = self.new_connected_peer(); self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info)); peer_id } - fn add_random_peer(&mut self, remote_info: SyncInfo) -> PeerId { + fn add_supernode_peer(&mut self, remote_info: SyncInfo) -> PeerId { // Create valid peer known to network globals // TODO(fulu): Using supernode peers to ensure we have peer across all column // subnets for syncing. Should add tests connecting to full node peers. @@ -148,17 +148,13 @@ impl TestRig { peer_id } - fn add_random_peers(&mut self, remote_info: SyncInfo, count: usize) { - for _ in 0..count { + fn add_fullnode_peers(&mut self, remote_info: SyncInfo, peer_count: usize) { + for _ in 0..peer_count { let peer = self.new_connected_peer(); - self.add_peer(peer, remote_info.clone()); + self.send_sync_message(SyncMessage::AddPeer(peer, remote_info.clone())); } } - fn add_peer(&mut self, peer: PeerId, remote_info: SyncInfo) { - self.send_sync_message(SyncMessage::AddPeer(peer, remote_info)); - } - fn assert_state(&self, state: RangeSyncType) { assert_eq!( self.sync_manager @@ -562,19 +558,14 @@ const EXTRA_SYNCED_EPOCHS: u64 = 2 + 1; fn finalized_sync_enough_global_custody_peers_few_chain_peers() { // Run for all forks let mut r = TestRig::test_setup(); - // This test creates enough global custody peers to satisfy column queries but only adds few - // peers to the chain - r.new_connected_peers_for_peerdas(); let advanced_epochs: u64 = 2; let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); - // Current priorization only sends batches to idle peers, so we need enough peers for each batch - // TODO: Test this with a single peer in the chain, it should still work - r.add_random_peers( - remote_info, - (advanced_epochs + EXTRA_SYNCED_EPOCHS) as usize, - ); + // Generate enough peers and supernodes to cover all custody columns + let peer_count = 100; + r.add_fullnode_peers(remote_info.clone(), peer_count); + r.add_supernode_peer(remote_info); r.assert_state(RangeSyncType::Finalized); let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; @@ -592,9 +583,9 @@ fn finalized_sync_not_enough_custody_peers_on_start() { let advanced_epochs: u64 = 2; let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); - // Unikely that the single peer we added has enough columns for us. Tests are determinstic and + // Unikely that the single peer we added has enough columns for us. Tests are deterministic and // this error should never be hit - r.add_random_peer_not_supernode(remote_info.clone()); + r.add_fullnode_peer(remote_info.clone()); r.assert_state(RangeSyncType::Finalized); // Because we don't have enough peers on all columns we haven't sent any request. @@ -603,14 +594,9 @@ fn finalized_sync_not_enough_custody_peers_on_start() { r.expect_empty_network(); // Generate enough peers and supernodes to cover all custody columns - r.new_connected_peers_for_peerdas(); - // Note: not necessary to add this peers to the chain, as we draw from the global pool - // We still need to add enough peers to trigger batch downloads with idle peers. Same issue as - // the test above. - r.add_random_peers( - remote_info, - (advanced_epochs + EXTRA_SYNCED_EPOCHS - 1) as usize, - ); + let peer_count = 100; + r.add_fullnode_peers(remote_info.clone(), peer_count); + r.add_supernode_peer(remote_info); let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; r.complete_and_process_range_sync_until(last_epoch, filter()); diff --git a/scripts/tests/genesis-sync-config-fulu.yaml b/scripts/tests/genesis-sync-config-fulu.yaml index 91aa4d1ffd..b25ac0a704 100644 --- a/scripts/tests/genesis-sync-config-fulu.yaml +++ b/scripts/tests/genesis-sync-config-fulu.yaml @@ -3,19 +3,20 @@ participants: - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 + supernode: true count: 2 # nodes without validators, used for testing sync. - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 supernode: true validator_count: 0 - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 supernode: false validator_count: 0 network_params: From 6409a32274ea51683c6b2edbcbeea22f73807acf Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Mon, 14 Jul 2025 11:24:49 +0800 Subject: [PATCH 32/44] Add a guide to partially reconstruct historic states to Lighthouse book (#7679) The main change is adding a guide to partially reconstruct historic states to the FAQ. Other changes: - Update the database scheme info - Delete the Homebrew issue as it has been solved in https://github.com/Homebrew/homebrew-core/pull/225877 - Update default gas limit in: [7cbf7f1](https://github.com/sigp/lighthouse/pull/7679/commits/7cbf7f15164788065328d6848c9034608badbc59) - Updated the binary installation page [8076ca7](https://github.com/sigp/lighthouse/pull/7679/commits/8076ca79059cd312f08c885162ba1edd0c203772) as Lighthouse now supports aarch-apple binary built since v7.1.0 --- book/src/advanced_builders.md | 6 +++--- book/src/faq.md | 35 +++++++++++++++++++++++++++++++ book/src/installation_binaries.md | 3 ++- book/src/installation_homebrew.md | 3 --- wordlist.txt | 1 + 5 files changed, 41 insertions(+), 7 deletions(-) diff --git a/book/src/advanced_builders.md b/book/src/advanced_builders.md index de7d02d956..3beb7c71c4 100644 --- a/book/src/advanced_builders.md +++ b/book/src/advanced_builders.md @@ -114,7 +114,7 @@ Each field is optional. ```json { "builder_proposals": true, - "gas_limit": 30000001 + "gas_limit": 45000001 } ``` @@ -127,7 +127,7 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 -H "Content-Type: application/json" \ -d '{ "builder_proposals": true, - "gas_limit": 30000001 + "gas_limit": 45000001 }' | jq ``` @@ -161,7 +161,7 @@ You can also directly configure these fields in the `validator_definitions.yml` voting_keystore_path: /home/paul/.lighthouse/validators/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007/voting-keystore.json voting_keystore_password_path: /home/paul/.lighthouse/secrets/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" - gas_limit: 30000001 + gas_limit: 45000001 builder_proposals: true builder_boost_factor: 50 - enabled: false diff --git a/book/src/faq.md b/book/src/faq.md index 27726e59a5..87ef288900 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -14,6 +14,7 @@ - [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice) - [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full) - [My beacon node logs `WARN Failed to finalize deposit cache`, what should I do?](#bn-deposit-cache) +- [How can I construct only partial state history?](#bn-partial-history) ## [Validator](#validator-1) @@ -190,6 +191,40 @@ If the node is syncing or downloading historical blocks, the error should disapp This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself. +### How can I construct only partial state history? + +Lighthouse prunes finalized states by default. Nevertheless, it is quite often that users may be interested in the state history of a few epochs before finalization. To have access to these pruned states, Lighthouse typically requires a full reconstruction of states using the flag `--reconstruct-historic-states` (which will usually take a week). Partial state history can be achieved with some "tricks". Here are the general steps: + + 1. Delete the current database. You can do so with `--purge-db-force` or manually deleting the database from the data directory: `$datadir/beacon`. + + 1. If you are interested in the states from the current slot and beyond, perform a checkpoint sync with the flag `--reconstruct-historic-states`, then you can skip the following and jump straight to Step 5 to check the database. + + If you are interested in the states before the current slot, identify the slot to perform a manual checkpoint sync. With the default configuration, this slot should be divisible by 221, as this is where a full state snapshot is stored. With the flag `--reconstruct-historic-states`, the state upper limit will be adjusted to the next full snapshot slot, a slot that satisfies: `slot % 2**21 == 0`. In other words, to have the state history available before the current slot, we have to checkpoint sync 221 slots before the next full snapshot slot. + + Example: Say the current mainnet is at slot `12000000`. As the next full state snapshot is at slot `12582912`, the slot that we want is slot `10485760`. You can calculate this (in Python) using `12000000 // 2**21 * 2**21`. + + 1. [Export](./advanced_checkpoint_sync.md#manual-checkpoint-sync) the blobs, block and state data for the slot identified in Step 2. This can be done from another beacon node that you have access to, or you could use any available public beacon API, e.g., [QuickNode](https://www.quicknode.com/docs/ethereum). + + 1. Perform a [manual checkpoint sync](./advanced_checkpoint_sync.md#manual-checkpoint-sync) using the data from the previous step, and provide the flag `--reconstruct-historic-states`. + + 1. Check the database: + + ```bash + curl "http://localhost:5052/lighthouse/database/info" | jq '.anchor' + ``` + + and look for the field `state_upper_limit`. It should show the slot of the snapshot: + + ```json + "state_upper_limit": "10485760", + ``` + +Lighthouse will now start to reconstruct historic states from slot `10485760`. At this point, if you do not want a full state reconstruction, you may remove the flag `--reconstruct-historic-states` (and restart). When the process is completed, you will have the state data from slot `10485760`. Going forward, Lighthouse will continue retaining all historical states newer than the snapshot. Eventually this can lead to increased disk usage, which presently can only be reduced by repeating the process starting from a more recent snapshot. + +> Note: You may only be interested in very recent historic states. To do so, you may configure the full snapshot to be, for example, every 211 slots, see [database configuration](./advanced_database.md#hierarchical-state-diffs) for more details. This can be configured with the flag `--hierarchy-exponents 5,7,11` together with the flag `--reconstruct-historic-states`. This will affect the slot number in Step 2, while other steps remain the same. Note that this comes at the expense of a higher storage requirement. + +> With `--hierarchy-exponents 5,7,11`, using the same example as above, the next full state snapshot is at slot `12001280`. So the slot to checkpoint sync from is: slot `11999232`. + ## Validator ### Can I use redundancy in my staking setup? diff --git a/book/src/installation_binaries.md b/book/src/installation_binaries.md index e3a2bfb8a0..1d5477dcf7 100644 --- a/book/src/installation_binaries.md +++ b/book/src/installation_binaries.md @@ -6,11 +6,12 @@ on Github](https://github.com/sigp/lighthouse/releases). ## Platforms -Binaries are supplied for four platforms: +Binaries are supplied for five platforms: - `x86_64-unknown-linux-gnu`: AMD/Intel 64-bit processors (most desktops, laptops, servers) - `aarch64-unknown-linux-gnu`: 64-bit ARM processors (Raspberry Pi 4) - `x86_64-apple-darwin`: macOS with Intel chips +- `aarch64-apple-darwin`: macOS with ARM chips - `x86_64-windows`: Windows with 64-bit processors ## Usage diff --git a/book/src/installation_homebrew.md b/book/src/installation_homebrew.md index 9d33bfb3eb..f94764889e 100644 --- a/book/src/installation_homebrew.md +++ b/book/src/installation_homebrew.md @@ -5,9 +5,6 @@ Lighthouse is available on Linux and macOS via the [Homebrew package manager](ht Please note that this installation method is maintained by the Homebrew community. It is not officially supported by the Lighthouse team. -> Note: There is a [compilation error](https://github.com/Homebrew/homebrew-core/pull/220922) for Lighthouse v7.0.0 and above that remains unresolved. Users are recommended to download the binary from [the release -page](https://github.com/sigp/lighthouse/releases) or build from source. - ## Installation Install the latest version of the [`lighthouse`][formula] formula with: diff --git a/wordlist.txt b/wordlist.txt index ada0384d36..fdb2f43e42 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -75,6 +75,7 @@ Proto PRs Prysm QUIC +QuickNode RasPi README RESTful From 309c30136374cdf5f757d1fafbd3275a3d46ad34 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 13 Jul 2025 23:42:55 -0700 Subject: [PATCH 33/44] Allow /validator apis to work pre-genesis (#7729) N/A Lighthouse BN http endpoint would return a server error pre-genesis on the `validator/duties/attester` and `validator/prepare_beacon_proposer` because `slot_clock.now()` would return a `None` pre-genesis. The prysm VC depends on the endpoints pre-genesis and was having issues interoping with the lighthouse bn because of this reason. The proposer duties endpoint explicitly handles the pre-genesis case here https://github.com/sigp/lighthouse/blob/538067f1ff9840d44e3c2ea60581e18aba8c4143/beacon_node/http_api/src/proposer_duties.rs#L23-L28 I see no reason why we can't make the other endpoints more flexible to work pre-genesis. This PR handles the pre-genesis case on the attester and prepare_beacon_proposer endpoints as well. Thanks for raising @james-prysm. --- beacon_node/http_api/src/attester_duties.rs | 23 +++++++++++++++------ beacon_node/http_api/src/lib.rs | 6 +++++- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 8905b24cde..b42e474b5c 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -16,7 +16,12 @@ pub fn attester_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let current_epoch = chain.epoch().map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = chain + .slot_clock + .now_or_genesis() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; // Determine what the current epoch would be if we fast-forward our system clock by // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. @@ -24,11 +29,17 @@ pub fn attester_duties( // Most of the time, `tolerant_current_epoch` will be equal to `current_epoch`. However, during // the first `MAXIMUM_GOSSIP_CLOCK_DISPARITY` duration of the epoch `tolerant_current_epoch` // will equal `current_epoch + 1` - let tolerant_current_epoch = chain - .slot_clock - .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) - .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? - .epoch(T::EthSpec::slots_per_epoch()); + let tolerant_current_epoch = if chain.slot_clock.is_prior_to_genesis().unwrap_or(true) { + current_epoch + } else { + chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or_else(|| { + warp_utils::reject::custom_server_error("unable to read slot clock".into()) + })? + .epoch(T::EthSpec::slots_per_epoch()) + }; if request_epoch == current_epoch || request_epoch == current_epoch + 1 diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 2db93c0033..cacdd4a44c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3790,7 +3790,11 @@ pub fn serve( .ok_or(BeaconChainError::ExecutionLayerMissing) .map_err(warp_utils::reject::unhandled_error)?; - let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( From d6de8a7484777f3653dcfdd4b3b7c26d43c127ca Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 17 Jul 2025 09:50:28 +0200 Subject: [PATCH 34/44] Add additional broadcast validation tests for Fulu/PeerDAS (#7325) Closes #6855 Add PeerDAS broadcast validation tests and fix a small bug where `sampling_columns_indices` is none (indicating that we've already sampled the necessary columns) and `process_gossip_data_columns` gets called --- .github/workflows/test-suite.yml | 23 ++++++ Makefile | 18 ++++- beacon_node/http_api/src/publish_blocks.rs | 34 ++++----- .../tests/broadcast_validation_tests.rs | 70 ++++++++++++------- 4 files changed, 102 insertions(+), 43 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index d86abd0721..ac496f1f20 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -159,6 +159,28 @@ jobs: - name: Show cache stats if: env.SELF_HOSTED_RUNNERS == 'true' run: sccache --show-stats + http-api-tests: + name: http-api-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' + # Use self-hosted runners only on the sigp repo. + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v4 + - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run http_api tests for all recent forks + run: make test-http-api + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats op-pool-tests: name: op-pool-tests needs: [check-labels] @@ -475,6 +497,7 @@ jobs: 'op-pool-tests', 'network-tests', 'slasher-tests', + 'http-api-tests', 'debug-tests-ubuntu', 'state-transition-vectors-ubuntu', 'ef-tests-ubuntu', diff --git a/Makefile b/Makefile index 75b6811b74..a2d07a87ee 100644 --- a/Makefile +++ b/Makefile @@ -34,6 +34,9 @@ PROFILE ?= release # they run for different forks. FORKS=phase0 altair bellatrix capella deneb electra fulu +# List of all recent hard forks. This list is used to set env variables for http_api tests +RECENT_FORKS=electra fulu + # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= @@ -141,24 +144,26 @@ build-release-tarballs: test-release: cargo test --workspace --release --features "$(TEST_FEATURES)" \ --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network + --exclude http_api # Runs the full workspace tests in **release**, without downloading any additional # test vectors, using nextest. nextest-release: cargo nextest run --workspace --release --features "$(TEST_FEATURES)" \ - --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network + --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network \ + --exclude http_api # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: cargo test --workspace --features "$(TEST_FEATURES)" \ - --exclude ef_tests --exclude beacon_chain --exclude network + --exclude ef_tests --exclude beacon_chain --exclude network --exclude http_api # Runs the full workspace tests in **debug**, without downloading any additional test # vectors, using nextest. nextest-debug: cargo nextest run --workspace --features "$(TEST_FEATURES)" \ - --exclude ef_tests --exclude beacon_chain --exclude network + --exclude ef_tests --exclude beacon_chain --exclude network --exclude http_api # Runs cargo-fmt (linter). cargo-fmt: @@ -188,6 +193,13 @@ test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain +# Run the tests in the `beacon_chain` crate for all known forks. +test-http-api: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS)) + +test-http-api-%: + env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p http_api + + # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 75979bbb1d..5d581859ae 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -228,23 +228,25 @@ pub async fn publish_block>( .into_iter() .flatten() .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) - .collect(); + .collect::>(); - // Importing the columns could trigger block import and network publication in the case - // where the block was already seen on gossip. - if let Err(e) = - Box::pin(chain.process_gossip_data_columns(sampling_columns, publish_fn)).await - { - let msg = format!("Invalid data column: {e}"); - return if let BroadcastValidation::Gossip = validation_level { - Err(warp_utils::reject::broadcast_without_import(msg)) - } else { - error!( - reason = &msg, - "Invalid data column during block publication" - ); - Err(warp_utils::reject::custom_bad_request(msg)) - }; + if !sampling_columns.is_empty() { + // Importing the columns could trigger block import and network publication in the case + // where the block was already seen on gossip. + if let Err(e) = + Box::pin(chain.process_gossip_data_columns(sampling_columns, publish_fn)).await + { + let msg = format!("Invalid data column: {e}"); + return if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + reason = &msg, + "Invalid data column during block publication" + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; + } } } diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 28b81c2bda..27831b3a23 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,3 +1,4 @@ +use beacon_chain::test_utils::test_spec; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, GossipVerifiedBlock, IntoGossipVerifiedBlock, @@ -571,7 +572,8 @@ pub async fn equivocation_gossip() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let tester = InteractiveTester::::new(None, validator_count).await; + let spec = test_spec::(); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. tester.harness.advance_slot(); @@ -1359,18 +1361,22 @@ pub async fn blinded_equivocation_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } -/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response -/// even if the block has already been seen on gossip without any blobs. +/// This test checks that an HTTP POST request with the block & blobs/columns succeeds with a 200 response +/// even if the block has already been seen on gossip without any blobs/columns. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -pub async fn block_seen_on_gossip_without_blobs() { +pub async fn block_seen_on_gossip_without_blobs_or_columns() { let validation_level: Option = Some(BroadcastValidation::Gossip); // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); - let tester = InteractiveTester::::new(Some(spec), validator_count).await; + let tester = InteractiveTester::::new(None, validator_count).await; + let state = tester.harness.get_current_state(); + let fork_name = state.fork_name(&tester.harness.spec).unwrap(); + if !fork_name.deneb_enabled() { + return; + } // Create some chain depth. tester.harness.advance_slot(); @@ -1421,18 +1427,22 @@ pub async fn block_seen_on_gossip_without_blobs() { .block_is_known_to_fork_choice(&block.canonical_root())); } -/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response -/// even if the block has already been seen on gossip without all blobs. +/// This test checks that an HTTP POST request with the block & blobs/columns succeeds with a 200 response +/// even if the block has already been seen on gossip without all blobs/columns. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -pub async fn block_seen_on_gossip_with_some_blobs() { +pub async fn block_seen_on_gossip_with_some_blobs_or_columns() { let validation_level: Option = Some(BroadcastValidation::Gossip); // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); - let tester = InteractiveTester::::new(Some(spec), validator_count).await; + let tester = InteractiveTester::::new(None, validator_count).await; + let state = tester.harness.get_current_state(); + let fork_name = state.fork_name(&tester.harness.spec).unwrap(); + if !fork_name.deneb_enabled() { + return; + } // Create some chain depth. tester.harness.advance_slot(); @@ -1501,18 +1511,23 @@ pub async fn block_seen_on_gossip_with_some_blobs() { .block_is_known_to_fork_choice(&block.canonical_root())); } -/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response -/// even if the blobs have already been seen on gossip. +/// This test checks that an HTTP POST request with the block & blobs/columns succeeds with a 200 response +/// even if the blobs/columns have already been seen on gossip. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -pub async fn blobs_seen_on_gossip_without_block() { +pub async fn blobs_or_columns_seen_on_gossip_without_block() { + let spec = test_spec::(); let validation_level: Option = Some(BroadcastValidation::Gossip); // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); - let tester = InteractiveTester::::new(Some(spec), validator_count).await; + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + let state = tester.harness.get_current_state(); + let fork_name = state.fork_name(&tester.harness.spec).unwrap(); + if !fork_name.deneb_enabled() { + return; + } // Create some chain depth. tester.harness.advance_slot(); @@ -1570,15 +1585,19 @@ pub async fn blobs_seen_on_gossip_without_block() { /// This test checks that an HTTP POST request with the block succeeds with a 200 response /// if just the blobs have already been seen on gossip. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { +async fn blobs_or_columns_seen_on_gossip_without_block_and_no_http_blobs_or_columns() { let validation_level: Option = Some(BroadcastValidation::Gossip); // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); - let tester = InteractiveTester::::new(Some(spec), validator_count).await; + let tester = InteractiveTester::::new(None, validator_count).await; + let state = tester.harness.get_current_state(); + let fork_name = state.fork_name(&tester.harness.spec).unwrap(); + if !fork_name.deneb_enabled() { + return; + } // Create some chain depth. tester.harness.advance_slot(); @@ -1638,7 +1657,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -pub async fn slashable_blobs_seen_on_gossip_cause_failure() { +async fn slashable_blobs_or_columns_seen_on_gossip_cause_failure() { let validation_level: Option = Some(BroadcastValidation::ConsensusAndEquivocation); @@ -1646,8 +1665,12 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); - let tester = InteractiveTester::::new(Some(spec), validator_count).await; + let tester = InteractiveTester::::new(None, validator_count).await; + let state = tester.harness.get_current_state(); + let fork_name = state.fork_name(&tester.harness.spec).unwrap(); + if !fork_name.deneb_enabled() { + return; + } // Create some chain depth. tester.harness.advance_slot(); @@ -1714,10 +1737,9 @@ pub async fn duplicate_block_status_code() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let duplicate_block_status_code = StatusCode::IM_A_TEAPOT; let tester = InteractiveTester::::new_with_initializer_and_mutator( - Some(spec), + None, validator_count, None, None, From 3f06e5dfbac1e280a8411362079b9356628e5e7a Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 17 Jul 2025 23:51:11 -0500 Subject: [PATCH 35/44] Fix enr loading from disk with cgc (#7754) N/A During building an enr on startup, we weren't using the value in the custody context. This was resulting in the enr value getting updated when the cgc updates, the change getting persisted, but getting set back to the default on restart. This PR takes the value explicitly from the custody context. --- .../lighthouse_network/src/discovery/enr.rs | 56 ++++++++++++++----- .../lighthouse_network/src/discovery/mod.rs | 1 + .../lighthouse_network/src/service/mod.rs | 9 ++- lcli/src/generate_bootnode_enr.rs | 11 +++- 4 files changed, 57 insertions(+), 20 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 4c05560497..053527f119 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -159,6 +159,7 @@ pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, enr_fork_id: &EnrForkId, + custody_group_count: Option, next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { @@ -166,7 +167,14 @@ pub fn build_or_load_enr( // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, next_fork_digest, spec)?; + let mut local_enr = build_enr::( + &enr_key, + config, + enr_fork_id, + custody_group_count, + next_fork_digest, + spec, + )?; use_or_load_enr(&enr_key, &mut local_enr, config)?; Ok(local_enr) @@ -177,6 +185,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, + custody_group_count: Option, next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { @@ -271,14 +280,15 @@ pub fn build_enr( // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { - let custody_group_count = - if let Some(false_cgc) = config.advertise_false_custody_group_count { - false_cgc - } else if config.subscribe_all_data_column_subnets { - spec.number_of_custody_groups - } else { - spec.custody_requirement - }; + let custody_group_count = if let Some(cgc) = custody_group_count { + cgc + } else if let Some(false_cgc) = config.advertise_false_custody_group_count { + false_cgc + } else if config.subscribe_all_data_column_subnets { + spec.number_of_custody_groups + } else { + spec.custody_requirement + }; builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest); } @@ -361,18 +371,22 @@ mod test { spec } - fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { + fn build_enr_with_config( + config: NetworkConfig, + cgc: Option, + spec: &ChainSpec, + ) -> (Enr, CombinedKey) { let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&keypair); let enr_fork_id = EnrForkId::default(); - let enr = build_enr::(&enr_key, &config, &enr_fork_id, TEST_NFD, spec).unwrap(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, cgc, TEST_NFD, spec).unwrap(); (enr, enr_key) } #[test] fn test_nfd_enr_encoding() { let spec = make_fulu_spec(); - let enr = build_enr_with_config(NetworkConfig::default(), &spec).0; + let enr = build_enr_with_config(NetworkConfig::default(), None, &spec).0; assert_eq!(enr.next_fork_digest().unwrap(), TEST_NFD); } @@ -384,7 +398,7 @@ mod test { }; let spec = make_fulu_spec(); - let enr = build_enr_with_config(config, &spec).0; + let enr = build_enr_with_config(config, None, &spec).0; assert_eq!( enr.custody_group_count::(&spec).unwrap(), @@ -399,7 +413,7 @@ mod test { ..NetworkConfig::default() }; let spec = make_fulu_spec(); - let enr = build_enr_with_config(config, &spec).0; + let enr = build_enr_with_config(config, None, &spec).0; assert_eq!( enr.custody_group_count::(&spec).unwrap(), @@ -407,9 +421,21 @@ mod test { ); } + #[test] + fn custody_group_value() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: true, + ..NetworkConfig::default() + }; + let spec = make_fulu_spec(); + let enr = build_enr_with_config(config, Some(42), &spec).0; + + assert_eq!(enr.custody_group_count::(&spec).unwrap(), 42); + } + #[test] fn test_encode_decode_eth2_enr() { - let (enr, _key) = build_enr_with_config(NetworkConfig::default(), &E::default_spec()); + let (enr, _key) = build_enr_with_config(NetworkConfig::default(), None, &E::default_spec()); // Check all Eth2 Mappings are decodeable enr.eth2().unwrap(); enr.attestation_bitfield::().unwrap(); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index df866dfc64..bc7802ce9a 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1235,6 +1235,7 @@ mod tests { &enr_key, &config, &EnrForkId::default(), + None, next_fork_digest, &spec, ) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a880fdb3e7..4a6f34c76d 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -197,18 +197,21 @@ impl Network { .fork_context .next_fork_digest() .unwrap_or_else(|| ctx.fork_context.current_fork_digest()); + + let advertised_cgc = config + .advertise_false_custody_group_count + .unwrap_or(custody_group_count); let enr = crate::discovery::enr::build_or_load_enr::( local_keypair.clone(), &config, &ctx.enr_fork_id, + Some(advertised_cgc), next_fork_digest, &ctx.chain_spec, )?; // Construct the metadata - let advertised_cgc = config - .advertise_false_custody_group_count - .unwrap_or(custody_group_count); + let meta_data = utils::load_or_build_metadata(&config.network_dir, advertised_cgc); let seq_number = *meta_data.seq_number(); let globals = NetworkGlobals::new( diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 98ef0b96d4..b2fd7e7ec7 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -38,8 +38,15 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, &enr_fork_id, genesis_fork_digest, spec) - .map_err(|e| format!("Unable to create ENR: {:?}", e))?; + let enr = build_enr::( + &enr_key, + &config, + &enr_fork_id, + None, + genesis_fork_digest, + spec, + ) + .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; From 1046dfbfe79f37fe513afed70e2400d550835130 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 18 Jul 2025 00:36:18 -0500 Subject: [PATCH 36/44] Serialize bpo schedule in asending order (#7753) N/A Serializes the blob_schedule in ascending order to match other clients. This is needed to keep the output of `eth/v1/config/spec` http endpoint consistent across clients. cc @barnabasbusa --- consensus/types/src/chain_spec.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 4476cd69b3..49537073b4 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1469,7 +1469,7 @@ pub struct BlobParameters { // A wrapper around a vector of BlobParameters to ensure that the vector is reverse // sorted by epoch. -#[derive(arbitrary::Arbitrary, Serialize, Debug, PartialEq, Clone)] +#[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone)] pub struct BlobSchedule(Vec); impl<'de> Deserialize<'de> for BlobSchedule { @@ -1513,6 +1513,18 @@ impl BlobSchedule { } } +impl Serialize for BlobSchedule { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut schedule = self.0.clone(); + // reversing the list to get an ascending order + schedule.reverse(); + schedule.serialize(serializer) + } +} + impl<'a> IntoIterator for &'a BlobSchedule { type Item = &'a BlobParameters; type IntoIter = std::slice::Iter<'a, BlobParameters>; @@ -2620,6 +2632,19 @@ mod yaml_tests { default_max_blobs_per_block_electra() ); assert_eq!(spec.max_blobs_per_block_within_fork(ForkName::Fulu), 20); + + // Check that serialization is in ascending order + let yaml = serde_yaml::to_string(&spec.blob_schedule).expect("should serialize"); + + // Deserialize back to Vec to check order + let deserialized: Vec = + serde_yaml::from_str(&yaml).expect("should deserialize"); + + // Should be in ascending order by epoch + assert!( + deserialized.iter().map(|bp| bp.epoch.as_u64()).is_sorted(), + "BlobSchedule should serialize in ascending order by epoch" + ); } #[test] From 4a3e248b7e6362b656dfd22d9e4a5303fa7cfe7e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Jul 2025 12:11:27 +1000 Subject: [PATCH 37/44] Add heaptrack support (#7764) Although we're working on jemalloc profiler support in https://github.com/sigp/lighthouse/pull/7746, heaptrack seems to be producing more sensible results. This PR adds a heaptrack profile and a heaptrack feature so that we no longer need to patch the code in order to use heaptrack. This may prove complementary to jemalloc profiling, so I think there is no harm in having both. --- Cargo.toml | 4 ++++ lighthouse/Cargo.toml | 12 +++++++----- lighthouse/src/main.rs | 4 ++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 817c2f2d80..6737ff22c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -289,5 +289,9 @@ panic = "abort" codegen-units = 1 overflow-checks = true +[profile.release-debug] +inherits = "release" +debug = true + [patch.crates-io] quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 6a8fa00c1e..a6549f5574 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -35,8 +35,10 @@ beacon-node-leveldb = ["store/leveldb"] beacon-node-redb = ["store/redb"] # Supports console subscriber for debugging console-subscriber = ["console-subscriber/default"] +# Turns off jemalloc so that heaptrack may be used to analyse memory usage. +heaptrack = [] -# Deprecated. This is now enabled by default on non windows targets. +# Deprecated. This is now enabled by default on non windows targets (unless heaptrack is enabled). jemalloc = [] [dependencies] @@ -71,12 +73,12 @@ unused_port = { workspace = true } validator_client = { workspace = true } validator_manager = { path = "../validator_manager" } -[target.'cfg(not(target_os = "windows"))'.dependencies] -malloc_utils = { workspace = true, features = ["jemalloc"] } - -[target.'cfg(target_os = "windows")'.dependencies] +[target.'cfg(any(target_os = "windows", features = "heaptrack"))'.dependencies] malloc_utils = { workspace = true } +[target.'cfg(not(any(target_os = "windows", features = "heaptrack")))'.dependencies] +malloc_utils = { workspace = true, features = ["jemalloc"] } + [dev-dependencies] beacon_node_fallback = { workspace = true } beacon_processor = { workspace = true } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 10168d026f..28b8c7b8fc 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -74,11 +74,11 @@ fn bls_hardware_acceleration() -> bool { } fn allocator_name() -> String { - #[cfg(target_os = "windows")] + #[cfg(any(feature = "heaptrack", target_os = "windows"))] { "system".to_string() } - #[cfg(not(target_os = "windows"))] + #[cfg(not(any(feature = "heaptrack", target_os = "windows")))] match malloc_utils::jemalloc::page_size() { Ok(page_size) => format!("jemalloc ({}K)", page_size / 1024), Err(e) => format!("jemalloc (error: {e:?})"), From b48879a56620afb4060dc927d8dc16ab8001dd53 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 22 Jul 2025 20:48:49 +1000 Subject: [PATCH 38/44] Remove KZG verification from local block production and blobs fetched from the EL (#7713) #7700 As described in title, the EL already performs KZG verification on all blobs when they entered the mempool, so it's redundant to perform extra validation on blobs returned from the EL. This PR removes - KZG verification for both blobs and data columns during block production - KZG verification for data columns after fetch engine blobs call. I have not done this for blobs because it requires extra changes to check the observed cache, and doesn't feel like it's a worthy optimisation given the number of blobs per block. This PR does not remove KZG verification on the block publishing path yet. --- beacon_node/beacon_chain/src/beacon_chain.rs | 32 +--------------- .../src/data_column_verification.rs | 6 +++ .../fetch_blobs/fetch_blobs_beacon_adapter.rs | 14 ++----- .../beacon_chain/src/fetch_blobs/mod.rs | 37 +++++++++---------- .../beacon_chain/src/fetch_blobs/tests.rs | 14 ++----- beacon_node/beacon_chain/src/metrics.rs | 9 ----- 6 files changed, 32 insertions(+), 80 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 01075ae4a4..8db432bbec 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -70,7 +70,7 @@ use crate::validator_monitor::{ }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - kzg_utils, metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, + metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, }; use eth2::types::{ @@ -5748,8 +5748,6 @@ impl BeaconChain { let (mut block, _) = block.deconstruct(); *block.state_root_mut() = state_root; - let blobs_verification_timer = - metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES); let blob_items = match maybe_blobs_and_proofs { Some((blobs, proofs)) => { let expected_kzg_commitments = @@ -5768,37 +5766,11 @@ impl BeaconChain { ))); } - let kzg_proofs = Vec::from(proofs); - - let kzg = self.kzg.as_ref(); - if self - .spec - .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) - { - kzg_utils::validate_blobs_and_cell_proofs::( - kzg, - blobs.iter().collect(), - &kzg_proofs, - expected_kzg_commitments, - ) - .map_err(BlockProductionError::KzgError)?; - } else { - kzg_utils::validate_blobs::( - kzg, - expected_kzg_commitments, - blobs.iter().collect(), - &kzg_proofs, - ) - .map_err(BlockProductionError::KzgError)?; - } - - Some((kzg_proofs.into(), blobs)) + Some((proofs, blobs)) } None => None, }; - drop(blobs_verification_timer); - metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); trace!( diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 3009522bf6..e079b5ab78 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -266,6 +266,12 @@ impl KzgVerifiedDataColumn { verify_kzg_for_data_column(data_column, kzg) } + /// Mark a data column as KZG verified. Caller must ONLY use this on columns constructed + /// from EL blobs. + pub fn from_execution_verified(data_column: Arc>) -> Self { + Self { data: data_column } + } + /// Create a `KzgVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. pub(crate) fn __new_for_testing(data_column: Arc>) -> Self { Self { data: data_column } diff --git a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs index 4a7a5aeea2..fe8af5b70e 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs @@ -1,17 +1,16 @@ use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; -use crate::data_column_verification::KzgVerifiedDataColumn; use crate::fetch_blobs::{EngineGetBlobsOutput, FetchEngineBlobError}; use crate::observed_block_producers::ProposalKey; use crate::observed_data_sidecars::DoNotObserve; use crate::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; -use kzg::{Error as KzgError, Kzg}; +use kzg::Kzg; #[cfg(test)] use mockall::automock; use std::collections::HashSet; use std::sync::Arc; use task_executor::TaskExecutor; -use types::{BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, Hash256, Slot}; +use types::{BlobSidecar, ChainSpec, ColumnIndex, Hash256, Slot}; /// An adapter to the `BeaconChain` functionalities to remove `BeaconChain` from direct dependency to enable testing fetch blobs logic. pub(crate) struct FetchBlobsBeaconAdapter { @@ -77,14 +76,7 @@ impl FetchBlobsBeaconAdapter { GossipVerifiedBlob::::new(blob.clone(), blob.index, &self.chain) } - pub(crate) fn verify_data_columns_kzg( - &self, - data_columns: Vec>>, - ) -> Result>, KzgError> { - KzgVerifiedDataColumn::from_batch(data_columns, &self.chain.kzg) - } - - pub(crate) fn known_for_proposal( + pub(crate) fn data_column_known_for_proposal( &self, proposal_key: ProposalKey, ) -> Option> { diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index e02405ddba..bf4409fbb9 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -14,7 +14,7 @@ mod tests; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_verification_types::AsBlock; -use crate::data_column_verification::KzgVerifiedCustodyDataColumn; +use crate::data_column_verification::{KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn}; #[cfg_attr(test, double)] use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter; use crate::kzg_utils::blobs_to_data_column_sidecars; @@ -311,6 +311,9 @@ async fn fetch_and_process_blobs_v2( return Ok(None); } + // Up until this point we have not observed the data columns in the gossip cache, which allows + // them to arrive independently while this function is running. In publish_fn we will observe + // them and then publish any columns that had not already been observed. publish_fn(EngineGetBlobsOutput::CustodyColumns( custody_columns_to_import.clone(), )); @@ -358,17 +361,24 @@ async fn compute_custody_columns_to_import( // `DataAvailabilityChecker` requires a strict match on custody columns count to // consider a block available. let mut custody_columns = data_columns_result - .map(|mut data_columns| { - data_columns.retain(|col| custody_columns_indices.contains(&col.index)); + .map(|data_columns| { data_columns + .into_iter() + .filter(|col| custody_columns_indices.contains(&col.index)) + .map(|col| { + KzgVerifiedCustodyDataColumn::from_asserted_custody( + KzgVerifiedDataColumn::from_execution_verified(col), + ) + }) + .collect::>() }) .map_err(FetchEngineBlobError::DataColumnSidecarError)?; // Only consider columns that are not already observed on gossip. - if let Some(observed_columns) = chain_adapter_cloned.known_for_proposal( + if let Some(observed_columns) = chain_adapter_cloned.data_column_known_for_proposal( ProposalKey::new(block.message().proposer_index(), block.slot()), ) { - custody_columns.retain(|col| !observed_columns.contains(&col.index)); + custody_columns.retain(|col| !observed_columns.contains(&col.index())); if custody_columns.is_empty() { return Ok(vec![]); } @@ -378,26 +388,13 @@ async fn compute_custody_columns_to_import( if let Some(known_columns) = chain_adapter_cloned.cached_data_column_indexes(&block_root) { - custody_columns.retain(|col| !known_columns.contains(&col.index)); + custody_columns.retain(|col| !known_columns.contains(&col.index())); if custody_columns.is_empty() { return Ok(vec![]); } } - // KZG verify data columns before publishing. This prevents blobs with invalid - // KZG proofs from the EL making it into the data availability checker. We do not - // immediately add these blobs to the observed blobs/columns cache because we want - // to allow blobs/columns to arrive on gossip and be accepted (and propagated) while - // we are waiting to publish. Just before publishing we will observe the blobs/columns - // and only proceed with publishing if they are not yet seen. - let verified = chain_adapter_cloned - .verify_data_columns_kzg(custody_columns) - .map_err(FetchEngineBlobError::KzgError)?; - - Ok(verified - .into_iter() - .map(KzgVerifiedCustodyDataColumn::from_asserted_custody) - .collect()) + Ok(custody_columns) }, "compute_custody_columns_to_import", ) diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index 3178020c75..9cb597c6df 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -1,4 +1,3 @@ -use crate::data_column_verification::KzgVerifiedDataColumn; use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter; use crate::fetch_blobs::{ fetch_and_process_engine_blobs_inner, EngineGetBlobsOutput, FetchEngineBlobError, @@ -156,7 +155,7 @@ mod get_blobs_v2 { mock_fork_choice_contains_block(&mut mock_adapter, vec![]); // All data columns already seen on gossip mock_adapter - .expect_known_for_proposal() + .expect_data_column_known_for_proposal() .returning(|_| Some(hashset![0, 1, 2])); // No blobs should be processed mock_adapter.expect_process_engine_blobs().times(0); @@ -192,17 +191,12 @@ mod get_blobs_v2 { // All blobs returned, fork choice doesn't contain block mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); mock_fork_choice_contains_block(&mut mock_adapter, vec![]); - mock_adapter.expect_known_for_proposal().returning(|_| None); + mock_adapter + .expect_data_column_known_for_proposal() + .returning(|_| None); mock_adapter .expect_cached_data_column_indexes() .returning(|_| None); - mock_adapter - .expect_verify_data_columns_kzg() - .returning(|c| { - Ok(c.into_iter() - .map(KzgVerifiedDataColumn::__new_for_testing) - .collect()) - }); mock_process_engine_blobs_result( &mut mock_adapter, Ok(AvailabilityProcessingStatus::Imported(block_root)), diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 23d7a1542d..1ba4015281 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1828,15 +1828,6 @@ pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> ) }); -pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( - || { - try_create_histogram( - "beacon_block_production_blobs_verification_seconds", - "Time taken to verify blobs against commitments and creating BlobSidecar objects in block production" - ) - }, -); - /* * Data Availability cache metrics */ From db8b6be9df1729a0ef6aefd67e2785edec7a4503 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 22 Jul 2025 15:30:30 +0200 Subject: [PATCH 39/44] Data column custody info (#7648) #7647 Introduces a new record in the blobs db `DataColumnCustodyInfo` When `DataColumnCustodyInfo` exists in the db this indicates that a recent cgc change has occurred and/or that a custody backfill sync is currently in progress (custody backfill will be added as a separate PR). When a cgc change has occurred `earliest_available_slot` will be equal to the slot at which the cgc change occured. During custody backfill sync`earliest_available_slot` should be updated incrementally as it progresses. ~~Note that if `advertise_false_custody_group_count` is enabled we do not add a `DataColumnCustodyInfo` record in the db as that would affect the status v2 response.~~ (See comment https://github.com/sigp/lighthouse/pull/7648#discussion_r2212403389) ~~If `DataColumnCustodyInfo` doesn't exist in the db this indicates that we have fulfilled our custody requirements up to the DA window.~~ (It now always exist, and the slot will be set to `None` once backfill is complete) StatusV2 now uses `DataColumnCustodyInfo` to calculate the `earliest_available_slot` if a `DataColumnCustodyInfo` record exists in the db, if it's `None`, then we return the `oldest_block_slot`. --- beacon_node/beacon_chain/src/beacon_chain.rs | 9 +++ beacon_node/beacon_chain/src/schema_change.rs | 12 ++++ .../src/schema_change/migration_schema_v27.rs | 26 +++++++++ .../beacon_chain/src/validator_custody.rs | 2 + .../beacon_chain/tests/schema_stability.rs | 32 ++++++++-- beacon_node/beacon_chain/tests/store_tests.rs | 6 +- beacon_node/http_api/src/lib.rs | 7 ++- beacon_node/network/src/status.rs | 16 ++++- beacon_node/store/src/hot_cold_store.rs | 58 +++++++++++++++++-- beacon_node/store/src/lib.rs | 3 + beacon_node/store/src/metadata.rs | 27 ++++++++- 11 files changed, 185 insertions(+), 13 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8db432bbec..6397960682 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6807,6 +6807,15 @@ impl BeaconChain { .map(|duration| (next_digest_epoch, duration)) } + /// Update data column custody info with the slot at which cgc was changed. + pub fn update_data_column_custody_info(&self, slot: Option) { + self.store + .put_data_column_custody_info(slot) + .unwrap_or_else( + |e| tracing::error!(error = ?e, "Failed to update data column custody info"), + ); + } + /// This method serves to get a sense of the current chain health. It is used in block proposal /// to determine whether we should outsource payload production duties. /// diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 317b89cbdd..15c9498e1c 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -3,6 +3,7 @@ mod migration_schema_v23; mod migration_schema_v24; mod migration_schema_v25; mod migration_schema_v26; +mod migration_schema_v27; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; @@ -67,6 +68,17 @@ pub fn migrate_schema( let ops = migration_schema_v26::downgrade_from_v26::(db.clone())?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(26), SchemaVersion(27)) => { + // This migration updates the blobs db. The schema version + // is bumped inside upgrade_to_v27. + migration_schema_v27::upgrade_to_v27::(db.clone()) + } + (SchemaVersion(27), SchemaVersion(26)) => { + // Downgrading is essentially a no-op and is only possible + // if peer das isn't scheduled. + migration_schema_v27::downgrade_from_v27::(db.clone())?; + db.store_schema_version_atomically(to, vec![]) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs new file mode 100644 index 0000000000..6275b1c5be --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs @@ -0,0 +1,26 @@ +use crate::BeaconChainTypes; +use std::sync::Arc; +use store::{metadata::SchemaVersion, Error, HotColdDB}; + +/// Add `DataColumnCustodyInfo` entry to v27. +pub fn upgrade_to_v27( + db: Arc>, +) -> Result<(), Error> { + if db.spec.is_peer_das_scheduled() { + db.put_data_column_custody_info(None)?; + db.store_schema_version_atomically(SchemaVersion(27), vec![])?; + } + + Ok(()) +} + +pub fn downgrade_from_v27( + db: Arc>, +) -> Result<(), Error> { + if db.spec.is_peer_das_scheduled() { + return Err(Error::MigrationError( + "Cannot downgrade from v27 if peerDAS is scheduled".to_string(), + )); + } + Ok(()) +} diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 7dc5b18ae4..4224125a2a 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -217,6 +217,7 @@ impl CustodyContext { new_custody_group_count: updated_cgc, sampling_count: self .num_of_custody_groups_to_sample(Some(effective_epoch), spec), + effective_epoch, }); } } @@ -287,6 +288,7 @@ impl CustodyContext { pub struct CustodyCountChanged { pub new_custody_group_count: u64, pub sampling_count: u64, + pub effective_epoch: Epoch, } /// The custody information that gets persisted across runs. diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index fc37a1159b..1d12fc878e 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -9,12 +9,14 @@ use operation_pool::PersistedOperationPool; use ssz::Encode; use std::sync::{Arc, LazyLock}; use store::{ - database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::DataColumnInfo, + database::interface::BeaconNodeBackend, + hot_cold_store::Split, + metadata::{DataColumnCustodyInfo, DataColumnInfo}, DBColumn, HotColdDB, StoreConfig, StoreItem, }; use strum::IntoEnumIterator; use tempfile::{tempdir, TempDir}; -use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec}; +use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot}; type E = MainnetEthSpec; type Store = Arc, BeaconNodeBackend>>; @@ -84,11 +86,13 @@ async fn schema_stability() { chain.persist_op_pool().unwrap(); chain.persist_custody_context().unwrap(); + insert_data_column_custody_info(&store, &harness.spec); check_db_columns(); check_metadata_sizes(&store); check_op_pool(&store); check_custody_context(&store, &harness.spec); + check_custody_info(&store, &harness.spec); check_persisted_chain(&store); // Not covered here: @@ -100,13 +104,21 @@ async fn schema_stability() { fn check_db_columns() { let current_columns: Vec<&'static str> = DBColumn::iter().map(|c| c.as_str()).collect(); let expected_columns = vec![ - "bma", "blk", "blb", "bdc", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", "bst", - "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", "brm", - "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", + "bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", + "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", + "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", ]; assert_eq!(expected_columns, current_columns); } +fn insert_data_column_custody_info(store: &Store, spec: &ChainSpec) { + if spec.is_peer_das_scheduled() { + store + .put_data_column_custody_info(Some(Slot::new(0))) + .unwrap(); + } +} + /// Check the SSZ sizes of known on-disk metadata. /// /// New types can be added here as the schema evolves. @@ -122,6 +134,7 @@ fn check_metadata_sizes(store: &Store) { } ); assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5); + assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5); } fn check_op_pool(store: &Store) { @@ -143,6 +156,15 @@ fn check_custody_context(store: &Store, spec: &ChainSpec) { } } +fn check_custody_info(store: &Store, spec: &ChainSpec) { + let data_column_custody_info = store.get_data_column_custody_info().unwrap(); + if spec.is_peer_das_scheduled() { + assert_eq!(data_column_custody_info.unwrap().as_ssz_bytes().len(), 13); + } else { + assert!(data_column_custody_info.is_none()); + } +} + fn check_persisted_chain(store: &Store) { let chain = store .get_item::(&Hash256::ZERO) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e9b19ee6e0..691ec00317 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3157,7 +3157,11 @@ async fn schema_downgrade_to_min_version( ) .await; - let min_version = SchemaVersion(22); + let min_version = if spec.is_fulu_scheduled() { + SchemaVersion(27) + } else { + SchemaVersion(22) + }; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index cacdd4a44c..83422090ca 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3760,7 +3760,6 @@ pub fn serve( .to_string(), )); } - Ok(()) }) }, @@ -3845,6 +3844,12 @@ pub fn serve( current_slot, &chain.spec, ) { + chain.update_data_column_custody_info(Some( + cgc_change + .effective_epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )); + network_tx.send(NetworkMessage::CustodyCountChanged { new_custody_group_count: cgc_change.new_custody_group_count, sampling_count: cgc_change.sampling_count, diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index be0d7c063b..6c2ada447d 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -29,8 +29,22 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) finalized_checkpoint.root = Hash256::zero(); } - let earliest_available_slot = beacon_chain.store.get_anchor_info().oldest_block_slot; + // NOTE: We are making an assumption that `get_data_column_custody_info` wont fail. + let earliest_available_data_column_slot = beacon_chain + .store + .get_data_column_custody_info() + .ok() + .flatten() + .and_then(|info| info.earliest_data_column_slot); + // If data_column_custody_info.earliest_data_column_slot is `None`, + // no recent cgc changes have occurred and no cgc backfill is in progress. + let earliest_available_slot = + if let Some(earliest_available_data_column_slot) = earliest_available_data_column_slot { + earliest_available_data_column_slot + } else { + beacon_chain.store.get_anchor_info().oldest_block_slot + }; StatusMessage::V2(StatusMessageV2 { fork_digest, finalized_root: finalized_checkpoint.root, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0c230494b8..9c9374e7fe 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -6,10 +6,10 @@ use crate::historic_state_cache::HistoricStateCache; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, SchemaVersion, ANCHOR_INFO_KEY, - ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, - CURRENT_SCHEMA_VERSION, DATA_COLUMN_INFO_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, - STATE_UPPER_LIMIT_NO_RETAIN, + AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnCustodyInfo, DataColumnInfo, + SchemaVersion, ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, + CONFIG_KEY, CURRENT_SCHEMA_VERSION, DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, + SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ @@ -91,6 +91,7 @@ struct BlockCache { block_cache: LruCache>, blob_cache: LruCache>, data_column_cache: LruCache>>>, + data_column_custody_info_cache: Option, } impl BlockCache { @@ -99,6 +100,7 @@ impl BlockCache { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), data_column_cache: LruCache::new(size), + data_column_custody_info_cache: None, } } pub fn put_block(&mut self, block_root: Hash256, block: SignedBeaconBlock) { @@ -112,6 +114,12 @@ impl BlockCache { .get_or_insert_mut(block_root, Default::default) .insert(data_column.index, data_column); } + pub fn put_data_column_custody_info( + &mut self, + data_column_custody_info: Option, + ) { + self.data_column_custody_info_cache = data_column_custody_info; + } pub fn get_block<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a SignedBeaconBlock> { self.block_cache.get(block_root) } @@ -129,6 +137,9 @@ impl BlockCache { .get(block_root) .and_then(|map| map.get(column_index).cloned()) } + pub fn get_data_column_custody_info(&self) -> Option { + self.data_column_custody_info_cache.clone() + } pub fn delete_block(&mut self, block_root: &Hash256) { let _ = self.block_cache.pop(block_root); } @@ -922,6 +933,24 @@ impl, Cold: ItemStore> HotColdDB )); } + pub fn put_data_column_custody_info( + &self, + earliest_data_column_slot: Option, + ) -> Result<(), Error> { + let data_column_custody_info = DataColumnCustodyInfo { + earliest_data_column_slot, + }; + + self.blobs_db + .put(&DATA_COLUMN_CUSTODY_INFO_KEY, &data_column_custody_info)?; + + self.block_cache + .lock() + .put_data_column_custody_info(Some(data_column_custody_info)); + + Ok(()) + } + pub fn put_data_columns( &self, block_root: &Hash256, @@ -2389,6 +2418,27 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch custody info from the cache. + /// If custody info doesn't exist in the cache, + /// try to fetch from the DB and prime the cache. + pub fn get_data_column_custody_info(&self) -> Result, Error> { + let Some(data_column_custody_info) = self.block_cache.lock().get_data_column_custody_info() + else { + let data_column_custody_info = self + .blobs_db + .get::(&DATA_COLUMN_CUSTODY_INFO_KEY)?; + + // Update the cache + self.block_cache + .lock() + .put_data_column_custody_info(data_column_custody_info.clone()); + + return Ok(data_column_custody_info); + }; + + Ok(Some(data_column_custody_info)) + } + /// Fetch all columns for a given block from the store. pub fn get_data_columns( &self, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index e996b47b72..a3d4e4a8ce 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -264,6 +264,8 @@ pub enum DBColumn { BeaconBlob, #[strum(serialize = "bdc")] BeaconDataColumn, + #[strum(serialize = "bdi")] + BeaconDataColumnCustodyInfo, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). /// /// DEPRECATED. @@ -424,6 +426,7 @@ impl DBColumn { | Self::CustodyContext | Self::OptimisticTransitionBlock => 32, Self::BeaconBlockRoots + | Self::BeaconDataColumnCustodyInfo | Self::BeaconBlockRootsChunked | Self::BeaconStateRoots | Self::BeaconStateRootsChunked diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 39a46451fc..b6091087ef 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(26); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(27); // All the keys that get stored under the `BeaconMeta` column. // @@ -18,6 +18,7 @@ pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); +pub const DATA_COLUMN_CUSTODY_INFO_KEY: Hash256 = Hash256::repeat_byte(8); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -204,6 +205,30 @@ impl StoreItem for BlobInfo { } } +/// Database parameter relevant to data column custody sync. There is only at most a single +/// `DataColumnCustodyInfo` stored in the db. `earliest_data_column_slot` is updated when cgc +/// count changes and is updated incrementally during data column custody backfill. Once custody backfill +/// is complete `earliest_data_column_slot` is set to `None`. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct DataColumnCustodyInfo { + /// The earliest slot for which data columns are available. + pub earliest_data_column_slot: Option, +} + +impl StoreItem for DataColumnCustodyInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconDataColumnCustodyInfo + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(DataColumnCustodyInfo::from_ssz_bytes(bytes)?) + } +} + /// Database parameters relevant to data column sync. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] pub struct DataColumnInfo { From e6089fe7db7809cbf391c2f50b69047655e47a61 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 23 Jul 2025 00:22:03 +1000 Subject: [PATCH 40/44] Control span data through tracing Extensions (#7239) #7234 Removes the `Arc` which was used to store and manage span data and replaces it with the inbuilt `Extension` for managing span-specific data. This also avoids an `unwrap` which was used when acquiring the lock over the mutex'd span data. --- common/logging/src/tracing_logging_layer.rs | 380 ++++++++++++-------- 1 file changed, 232 insertions(+), 148 deletions(-) diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index c3784a8f62..43feb3c86d 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -2,9 +2,7 @@ use crate::utils::is_ascii_control; use chrono::prelude::*; use serde_json::{Map, Value}; -use std::collections::HashMap; use std::io::Write; -use std::sync::{Arc, Mutex}; use tracing::field::Field; use tracing::span::Id; use tracing::Subscriber; @@ -23,7 +21,6 @@ pub struct LoggingLayer { pub log_color: bool, pub log_format: Option, pub extra_info: bool, - span_fields: Arc>>, } impl LoggingLayer { @@ -43,7 +40,6 @@ impl LoggingLayer { log_color, log_format, extra_info, - span_fields: Arc::new(Mutex::new(HashMap::new())), } } } @@ -52,23 +48,20 @@ impl Layer for LoggingLayer where S: Subscriber + for<'a> LookupSpan<'a>, { - fn on_new_span(&self, attrs: &tracing::span::Attributes<'_>, id: &Id, _ctx: Context) { - let metadata = attrs.metadata(); - let span_name = metadata.name(); - - let mut visitor = SpanFieldsExtractor::default(); + fn on_new_span(&self, attrs: &tracing::span::Attributes<'_>, id: &Id, ctx: Context) { + let mut visitor = FieldVisitor::new(); attrs.record(&mut visitor); - let span_data = SpanData { - name: span_name.to_string(), - fields: visitor.fields, - }; + if let Some(span) = ctx.span(id) { + let mut extensions = span.extensions_mut(); - let mut span_fields = match self.span_fields.lock() { - Ok(guard) => guard, - Err(poisoned) => poisoned.into_inner(), - }; - span_fields.insert(id.clone(), span_data); + let span_data = SpanData { + name: attrs.metadata().name().to_string(), + fields: visitor.fields, + }; + + extensions.replace(span_data); + } } fn on_event(&self, event: &tracing::Event<'_>, ctx: Context) { @@ -82,13 +75,19 @@ where let mut writer = self.non_blocking_writer.clone(); - let mut visitor = LogMessageExtractor { - message: String::new(), - fields: Vec::new(), - is_crit: false, - }; + let mut visitor = FieldVisitor::new(); + event.record(&mut visitor); + let mut span_data = Vec::new(); + if let Some(scope) = ctx.event_scope(event) { + for span in scope.from_root() { + if let Some(data) = span.extensions().get::() { + span_data.extend(data.fields.clone()); + } + } + } + // Remove ascii control codes from message. // All following formatting and logs components are predetermined or known. if visitor.message.as_bytes().iter().any(u8::is_ascii_control) { @@ -145,23 +144,13 @@ where }; if self.log_format.as_deref() == Some("JSON") { - build_log_json( - &visitor, - plain_level_str, - meta, - &ctx, - &self.span_fields, - event, - &mut writer, - ); + build_log_json(&visitor, plain_level_str, meta, &span_data, &mut writer); } else { build_log_text( &visitor, plain_level_str, ×tamp, - &ctx, - &self.span_fields, - event, + &span_data, &location, color_level_str, self.log_color, @@ -171,79 +160,65 @@ where } } -struct SpanData { - name: String, - fields: Vec<(String, String)>, +#[derive(Clone, Debug)] +pub struct SpanData { + pub name: String, + pub fields: Vec<(String, String)>, } -#[derive(Default)] -struct SpanFieldsExtractor { - fields: Vec<(String, String)>, -} - -impl tracing_core::field::Visit for SpanFieldsExtractor { - fn record_str(&mut self, field: &Field, value: &str) { - self.fields - .push((field.name().to_string(), format!("\"{}\"", value))); - } - - fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - self.fields - .push((field.name().to_string(), format!("{:?}", value))); - } - - fn record_i64(&mut self, field: &Field, value: i64) { - self.fields - .push((field.name().to_string(), value.to_string())); - } - - fn record_u64(&mut self, field: &Field, value: u64) { - self.fields - .push((field.name().to_string(), value.to_string())); - } - - fn record_bool(&mut self, field: &Field, value: bool) { - self.fields - .push((field.name().to_string(), value.to_string())); - } -} - -struct LogMessageExtractor { +struct FieldVisitor { message: String, fields: Vec<(String, String)>, is_crit: bool, } -impl tracing_core::field::Visit for LogMessageExtractor { +impl FieldVisitor { + fn new() -> Self { + FieldVisitor { + message: String::new(), + fields: Vec::new(), + is_crit: false, + } + } +} + +impl tracing_core::field::Visit for FieldVisitor { fn record_str(&mut self, field: &Field, value: &str) { - if field.name() == "message" { - if self.message.is_empty() { - self.message = value.to_string(); - } else { - self.fields - .push(("msg_id".to_string(), format!("\"{}\"", value))); + match field.name() { + "message" => { + if self.message.is_empty() { + self.message = value.to_string(); + } else { + self.fields + .push(("msg_id".to_string(), format!("\"{}\"", value))); + } + } + "error_type" if value == "crit" => { + self.is_crit = true; + } + _ => { + self.fields + .push((field.name().to_string(), format!("\"{}\"", value))); } - } else if field.name() == "error_type" && value == "crit" { - self.is_crit = true; - } else { - self.fields - .push((field.name().to_string(), format!("\"{}\"", value))); } } fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - if field.name() == "message" { - if self.message.is_empty() { - self.message = format!("{:?}", value); - } else { - self.fields - .push(("msg_id".to_string(), format!("{:?}", value))); + let string_value = format!("{:?}", value); + match field.name() { + "message" => { + if self.message.is_empty() { + self.message = string_value; + } else { + self.fields.push(("msg_id".to_string(), string_value)); + } + } + "error_type" if string_value == "\"crit\"" => { + self.is_crit = true; + } + _ => { + self.fields.push((field.name().to_string(), string_value)); } - } else if field.name() == "error_type" && format!("{:?}", value) == "\"crit\"" { - self.is_crit = true; - } else { - self.fields - .push((field.name().to_string(), format!("{:?}", value))); } } @@ -263,17 +238,13 @@ impl tracing_core::field::Visit for LogMessageExtractor { } } -fn build_log_json<'a, S>( - visitor: &LogMessageExtractor, +fn build_log_json( + visitor: &FieldVisitor, plain_level_str: &str, meta: &tracing::Metadata<'_>, - ctx: &Context<'_, S>, - span_fields: &Arc>>, - event: &tracing::Event<'_>, + span_fields: &[(String, String)], writer: &mut impl Write, -) where - S: Subscriber + for<'lookup> LookupSpan<'lookup>, -{ +) { let utc_timestamp = Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true); let mut log_map = Map::new(); @@ -302,19 +273,9 @@ fn build_log_json<'a, S>( log_map.insert(key, parsed_val); } - if let Some(scope) = ctx.event_scope(event) { - let guard = span_fields.lock().ok(); - if let Some(span_map) = guard { - for span in scope { - let id = span.id(); - if let Some(span_data) = span_map.get(&id) { - for (key, val) in &span_data.fields { - let parsed_span_val = parse_field(val); - log_map.insert(key.clone(), parsed_span_val); - } - } - } - } + for (key, val) in span_fields { + let parsed_span_val = parse_field(val); + log_map.insert(key.clone(), parsed_span_val); } let json_obj = Value::Object(log_map); @@ -326,48 +287,33 @@ fn build_log_json<'a, S>( } #[allow(clippy::too_many_arguments)] -fn build_log_text<'a, S>( - visitor: &LogMessageExtractor, +fn build_log_text( + visitor: &FieldVisitor, plain_level_str: &str, timestamp: &str, - ctx: &Context<'_, S>, - span_fields: &Arc>>, - event: &tracing::Event<'_>, + span_fields: &[(String, String)], location: &str, color_level_str: &str, use_color: bool, writer: &mut impl Write, -) where - S: Subscriber + for<'lookup> LookupSpan<'lookup>, -{ +) { let bold_start = "\x1b[1m"; let bold_end = "\x1b[0m"; - let mut collected_span_fields = Vec::new(); - - if let Some(scope) = ctx.event_scope(event) { - for span in scope { - let id = span.id(); - let span_fields_map = span_fields.lock().unwrap(); - if let Some(span_data) = span_fields_map.get(&id) { - collected_span_fields.push((span_data.name.clone(), span_data.fields.clone())); - } - } - } let mut formatted_spans = String::new(); - for (_, fields) in collected_span_fields.iter().rev() { - for (i, (field_name, field_value)) in fields.iter().enumerate() { - if i > 0 && !visitor.fields.is_empty() { - formatted_spans.push_str(", "); - } - if use_color { - formatted_spans.push_str(&format!( - "{}{}{}: {}", - bold_start, field_name, bold_end, field_value - )); - } else { - formatted_spans.push_str(&format!("{}: {}", field_name, field_value)); - } + for (i, (field_name, field_value)) in span_fields.iter().rev().enumerate() { + if use_color { + formatted_spans.push_str(&format!( + "{}{}{}: {}", + bold_start, field_name, bold_end, field_value + )); + } else { + formatted_spans.push_str(&format!("{}: {}", field_name, field_value)); + } + + // Check if this is not the last span. + if i != span_fields.len() - 1 { + formatted_spans.push_str(", "); } } @@ -419,7 +365,8 @@ fn build_log_text<'a, S>( } else { formatted_fields.push_str(&format!("{}: {}", field_name, field_value)); } - if i == visitor.fields.len() - 1 && !collected_span_fields.is_empty() { + // Check if this is the last field and that we are also adding spans. + if i == visitor.fields.len() - 1 && !span_fields.is_empty() { formatted_fields.push(','); } } @@ -455,3 +402,140 @@ fn parse_field(val: &str) -> Value { }; serde_json::from_str(cleaned).unwrap_or(Value::String(cleaned.to_string())) } + +#[cfg(test)] +mod tests { + use crate::tracing_logging_layer::{build_log_text, FieldVisitor}; + use std::io::Write; + + struct Buffer { + data: Vec, + } + + impl Buffer { + fn new() -> Self { + Buffer { data: Vec::new() } + } + + fn into_string(self) -> String { + String::from_utf8(self.data).unwrap() + } + } + + impl Write for Buffer { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.data.extend_from_slice(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } + } + + #[test] + fn test_build_log_text_single_log_field() { + let log_fields = vec![("field_name".to_string(), "field_value".to_string())]; + let span_fields = vec![]; + let expected = "Jan 1 08:00:00.000 INFO test message field_name: field_value \n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_multiple_log_fields() { + let log_fields = vec![ + ("field_name1".to_string(), "field_value1".to_string()), + ("field_name2".to_string(), "field_value2".to_string()), + ]; + let span_fields = vec![]; + let expected = "Jan 1 08:00:00.000 INFO test message field_name1: field_value1, field_name2: field_value2 \n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_log_field_and_span() { + let log_fields = vec![("field_name".to_string(), "field_value".to_string())]; + let span_fields = vec![( + "span_field_name".to_string(), + "span_field_value".to_string(), + )]; + let expected = "Jan 1 08:00:00.000 INFO test message field_name: field_value, span_field_name: span_field_value\n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_single_span() { + let log_fields = vec![]; + let span_fields = vec![( + "span_field_name".to_string(), + "span_field_value".to_string(), + )]; + let expected = "Jan 1 08:00:00.000 INFO test message span_field_name: span_field_value\n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_multiple_spans() { + let log_fields = vec![]; + let span_fields = vec![ + ( + "span_field_name1".to_string(), + "span_field_value1".to_string(), + ), + ( + "span_field_name2".to_string(), + "span_field_value2".to_string(), + ), + ]; + let expected = "Jan 1 08:00:00.000 INFO test message span_field_name2: span_field_value2, span_field_name1: span_field_value1\n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_multiple_span_fields() { + let log_fields = vec![]; + let span_fields = vec![ + ( + "span_field_name1-1".to_string(), + "span_field_value1-1".to_string(), + ), + ( + "span_field_name1-2".to_string(), + "span_field_value1-2".to_string(), + ), + ]; + let expected = "Jan 1 08:00:00.000 INFO test message span_field_name1-2: span_field_value1-2, span_field_name1-1: span_field_value1-1\n"; + test_build_log_text(log_fields, span_fields, expected); + } + + fn test_build_log_text( + log_fields: Vec<(String, String)>, + span_fields: Vec<(String, String)>, + expected: &str, + ) { + let visitor = FieldVisitor { + message: "test message".to_string(), + fields: log_fields, + is_crit: false, + }; + let plain_level_str = "INFO"; + let timestamp = "Jan 1 08:00:00.000"; + let location = ""; + let color_level_str = "\x1b[32mINFO\x1b[0m"; + let use_color = false; + let mut writer = Buffer::new(); + + build_log_text( + &visitor, + plain_level_str, + timestamp, + &span_fields, + location, + color_level_str, + use_color, + &mut writer, + ); + + assert_eq!(expected, &writer.into_string()); + } +} From ce99e0c383f28f0859b58e164a604b74831a1e42 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 23 Jul 2025 10:29:18 +1000 Subject: [PATCH 41/44] Refine delayed head block logging (#7705) Small tweak to `Delayed head block` logging to make it more representative of actual issues. Previously we used the total import delay to determine whether a block was late, but this includes the time taken for IO (and now hdiff computation) which happens _after_ the block is made attestable. This PR changes the logic to use the attestable delay (where possible) falling back to the previous value if the block doesn't have one; e.g. if it didn't meet the conditions to make it into the attestable cache. --- .../beacon_chain/src/canonical_head.rs | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index f96b59aec4..f80a2d4089 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -38,7 +38,7 @@ use crate::{ block_times_cache::BlockTimesCache, events::ServerSentEventHandler, metrics, - validator_monitor::{get_slot_delay_ms, timestamp_now}, + validator_monitor::get_slot_delay_ms, BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; @@ -1287,7 +1287,10 @@ fn observe_head_block_delays( slot_clock: &S, event_handler: Option<&ServerSentEventHandler>, ) { - let block_time_set_as_head = timestamp_now(); + let Some(block_time_set_as_head) = slot_clock.now_duration() else { + // Practically unreachable: the slot clock's time should not be before the UNIX epoch. + return; + }; let head_block_root = head_block.root; let head_block_slot = head_block.slot; let head_block_is_optimistic = head_block.execution_status.is_optimistic_or_invalid(); @@ -1308,10 +1311,6 @@ fn observe_head_block_delays( // If a block comes in from over 4 slots ago, it is most likely a block from sync. let block_from_sync = block_delay_total > slot_clock.slot_duration() * 4; - // Determine whether the block has been set as head too late for proper attestation - // production. - let late_head = block_delay_total >= slot_clock.unagg_attestation_production_delay(); - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during // sync. if !block_from_sync { @@ -1410,6 +1409,14 @@ fn observe_head_block_delays( .as_millis() as i64, ); + // Consider the block late if the time it became attestable is after the attestation + // deadline. If the block was not made attestable, use the set-as-head time. + let attestable_delay = block_delays.attestable.unwrap_or(block_delay_total); + + // Determine whether the block has been set as head too late for proper attestation + // production. + let late_head = attestable_delay >= slot_clock.unagg_attestation_production_delay(); + // If the block was enshrined as head too late for attestations to be created for it, // log a debug warning and increment a metric. let format_delay = |delay: &Option| { @@ -1432,6 +1439,24 @@ fn observe_head_block_delays( set_as_head_time_ms = format_delay(&block_delays.set_as_head), "Delayed head block" ); + if let Some(event_handler) = event_handler { + if event_handler.has_late_head_subscribers() { + let peer_info = block_times_cache.get_peer_info(head_block_root); + event_handler.register(EventKind::LateHead(SseLateHead { + slot: head_block_slot, + block: head_block_root, + peer_id: peer_info.id, + peer_client: peer_info.client, + proposer_index: head_block_proposer_index, + proposer_graffiti: head_block_graffiti, + block_delay: block_delay_total, + observed_delay: block_delays.observed, + imported_delay: block_delays.imported, + set_as_head_delay: block_delays.set_as_head, + execution_optimistic: head_block_is_optimistic, + })); + } + } } else { debug!( block_root = ?head_block_root, @@ -1450,29 +1475,4 @@ fn observe_head_block_delays( ); } } - - if let Some(event_handler) = event_handler { - if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { - let peer_info = block_times_cache.get_peer_info(head_block_root); - let block_delays = block_times_cache.get_block_delays( - head_block_root, - slot_clock - .start_of(head_block_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - event_handler.register(EventKind::LateHead(SseLateHead { - slot: head_block_slot, - block: head_block_root, - peer_id: peer_info.id, - peer_client: peer_info.client, - proposer_index: head_block_proposer_index, - proposer_graffiti: head_block_graffiti, - block_delay: block_delay_total, - observed_delay: block_delays.observed, - imported_delay: block_delays.imported, - set_as_head_delay: block_delays.set_as_head, - execution_optimistic: head_block_is_optimistic, - })); - } - } } From c4b973f5bafcf341c7a5847caf570fc8eee6a965 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Wed, 23 Jul 2025 08:29:21 +0800 Subject: [PATCH 42/44] Use SSZ by default when calling /eth/v3/validator/blocks (#7727) * #7698 --- .../validator_services/src/block_service.rs | 40 ++++++++++++++----- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index 01f786e160..4ff8b15bed 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -524,22 +524,44 @@ impl BlockService { proposer_index: Option, builder_boost_factor: Option, ) -> Result, BlockError> { - let (block_response, _) = beacon_node - .get_validator_blocks_v3::( + let block_response = match beacon_node + .get_validator_blocks_v3_ssz::( slot, randao_reveal_ref, graffiti.as_ref(), builder_boost_factor, ) .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })?; + { + Ok((ssz_block_response, _)) => ssz_block_response, + Err(e) => { + warn!( + slot = slot.as_u64(), + error = %e, + "Beacon node does not support SSZ in block production, falling back to JSON" + ); - let (block_proposer, unsigned_block) = match block_response.data { + let (json_block_response, _) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + // Extract ProduceBlockV3Response (data field of the struct ForkVersionedResponse) + json_block_response.data + } + }; + + let (block_proposer, unsigned_block) = match block_response { eth2::types::ProduceBlockV3Response::Full(block) => { (block.block().proposer_index(), UnsignedBlock::Full(block)) } From 4daa01597155fea4a06bb9a39fdd02ea56b9b291 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 23 Jul 2025 13:24:45 +1000 Subject: [PATCH 43/44] Remove peer sampling code (#7768) Peer sampling has been completely removed from the spec. This PR removes our partial implementation from the codebase. https://github.com/ethereum/consensus-specs/pull/4393 --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 - beacon_node/beacon_chain/src/chain_config.rs | 3 - beacon_node/beacon_processor/src/lib.rs | 54 +- beacon_node/beacon_processor/src/metrics.rs | 9 - .../src/scheduler/work_reprocessing_queue.rs | 97 --- .../src/service/api_types.rs | 48 +- beacon_node/network/src/metrics.rs | 35 - .../gossip_methods.rs | 15 - .../src/network_beacon_processor/mod.rs | 38 - .../network_beacon_processor/sync_methods.rs | 43 +- beacon_node/network/src/sync/manager.rs | 87 +-- beacon_node/network/src/sync/mod.rs | 2 - beacon_node/network/src/sync/peer_sampling.rs | 735 ------------------ beacon_node/network/src/sync/tests/lookups.rs | 309 +------- beacon_node/src/cli.rs | 9 - beacon_node/src/config.rs | 4 - lighthouse/tests/beacon_node.rs | 13 - 17 files changed, 11 insertions(+), 1509 deletions(-) delete mode 100644 beacon_node/network/src/sync/peer_sampling.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6397960682..602de6e25c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2959,16 +2959,6 @@ impl BeaconChain { ChainSegmentResult::Successful { imported_blocks } } - /// Updates fork-choice node into a permanent `available` state so it can become a viable head. - /// Only completed sampling results are received. Blocks are unavailable by default and should - /// be pruned on finalization, on a timeout or by a max count. - pub async fn process_sampling_completed(self: &Arc, block_root: Hash256) { - // TODO(das): update fork-choice, act on sampling result, adjust log level - // NOTE: It is possible that sampling complets before block is imported into fork choice, - // in that case we may need to update availability cache. - info!(%block_root, "Sampling completed"); - } - /// Returns `Ok(GossipVerifiedBlock)` if the supplied `block` should be forwarded onto the /// gossip network. The block is not imported into the chain, it is just partially verified. /// @@ -7043,15 +7033,6 @@ impl BeaconChain { && self.spec.is_peer_das_enabled_for_epoch(block_epoch) } - /// Returns true if we should issue a sampling request for this block - /// TODO(das): check if the block is still within the da_window - pub fn should_sample_slot(&self, slot: Slot) -> bool { - self.config.enable_sampling - && self - .spec - .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) - } - /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 808c96d965..08f17c6c6b 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -96,8 +96,6 @@ pub struct ChainConfig { pub enable_light_client_server: bool, /// The number of data columns to withhold / exclude from publishing when proposing a block. pub malicious_withhold_count: usize, - /// Enable peer sampling on blocks. - pub enable_sampling: bool, /// Number of batches that the node splits blobs or data columns into during publication. /// This doesn't apply if the node is the block proposer. For PeerDAS only. pub blob_publication_batches: usize, @@ -148,7 +146,6 @@ impl Default for ChainConfig { epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, enable_light_client_server: true, malicious_withhold_count: 0, - enable_sampling: false, blob_publication_batches: 4, blob_publication_batch_interval: Duration::from_millis(300), sync_tolerance_epochs: DEFAULT_SYNC_TOLERANCE_EPOCHS, diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 0f324071a1..ae785e5127 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -67,11 +67,11 @@ use types::{ BeaconState, ChainSpec, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, }; +use work_reprocessing_queue::IgnoredRpcBlock; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; -use work_reprocessing_queue::{IgnoredRpcBlock, QueuedSamplingRequest}; mod metrics; pub mod scheduler; @@ -112,12 +112,9 @@ pub struct BeaconProcessorQueueLengths { gossip_proposer_slashing_queue: usize, gossip_attester_slashing_queue: usize, unknown_light_client_update_queue: usize, - unknown_block_sampling_request_queue: usize, rpc_block_queue: usize, rpc_blob_queue: usize, rpc_custody_column_queue: usize, - rpc_verify_data_column_queue: usize, - sampling_result_queue: usize, column_reconstruction_queue: usize, chain_segment_queue: usize, backfill_chain_segment: usize, @@ -183,9 +180,6 @@ impl BeaconProcessorQueueLengths { rpc_blob_queue: 1024, // TODO(das): Placeholder values rpc_custody_column_queue: 1000, - rpc_verify_data_column_queue: 1000, - unknown_block_sampling_request_queue: 16384, - sampling_result_queue: 1000, column_reconstruction_queue: 64, chain_segment_queue: 64, backfill_chain_segment: 64, @@ -487,10 +481,6 @@ impl From for WorkEvent { process_fn, }, }, - ReadyWork::SamplingRequest(QueuedSamplingRequest { process_fn, .. }) => Self { - drop_during_sync: true, - work: Work::UnknownBlockSamplingRequest { process_fn }, - }, ReadyWork::BackfillSync(QueuedBackfillBatch(process_fn)) => Self { drop_during_sync: false, work: Work::ChainSegmentBackfill(process_fn), @@ -582,9 +572,6 @@ pub enum Work { parent_root: Hash256, process_fn: BlockingFn, }, - UnknownBlockSamplingRequest { - process_fn: BlockingFn, - }, GossipAggregateBatch { aggregates: Vec>, process_batch: Box>) + Send + Sync>, @@ -611,8 +598,6 @@ pub enum Work { process_fn: AsyncFn, }, RpcCustodyColumn(AsyncFn), - RpcVerifyDataColumn(AsyncFn), - SamplingResult(AsyncFn), ColumnReconstruction(AsyncFn), IgnoredRpcBlock { process_fn: BlockingFn, @@ -652,7 +637,6 @@ pub enum WorkType { GossipAggregate, UnknownBlockAggregate, UnknownLightClientOptimisticUpdate, - UnknownBlockSamplingRequest, GossipAggregateBatch, GossipBlock, GossipBlobSidecar, @@ -668,8 +652,6 @@ pub enum WorkType { RpcBlock, RpcBlobs, RpcCustodyColumn, - RpcVerifyDataColumn, - SamplingResult, ColumnReconstruction, IgnoredRpcBlock, ChainSegment, @@ -720,8 +702,6 @@ impl Work { Work::RpcBlock { .. } => WorkType::RpcBlock, Work::RpcBlobs { .. } => WorkType::RpcBlobs, Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, - Work::RpcVerifyDataColumn { .. } => WorkType::RpcVerifyDataColumn, - Work::SamplingResult { .. } => WorkType::SamplingResult, Work::ColumnReconstruction(_) => WorkType::ColumnReconstruction, Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, Work::ChainSegment { .. } => WorkType::ChainSegment, @@ -741,7 +721,6 @@ impl Work { Work::LightClientUpdatesByRangeRequest(_) => WorkType::LightClientUpdatesByRangeRequest, Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, - Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, Work::UnknownLightClientOptimisticUpdate { .. } => { WorkType::UnknownLightClientOptimisticUpdate } @@ -884,14 +863,8 @@ impl BeaconProcessor { let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); - let mut rpc_verify_data_column_queue = - FifoQueue::new(queue_lengths.rpc_verify_data_column_queue); - // TODO(das): the sampling_request_queue is never read - let mut sampling_result_queue = FifoQueue::new(queue_lengths.sampling_result_queue); let mut column_reconstruction_queue = FifoQueue::new(queue_lengths.column_reconstruction_queue); - let mut unknown_block_sampling_request_queue = - FifoQueue::new(queue_lengths.unknown_block_sampling_request_queue); let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); @@ -1058,13 +1031,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { Some(item) - // TODO(das): decide proper prioritization for sampling columns } else if let Some(item) = rpc_custody_column_queue.pop() { Some(item) - } else if let Some(item) = rpc_verify_data_column_queue.pop() { - Some(item) - } else if let Some(item) = sampling_result_queue.pop() { - Some(item) // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -1224,9 +1192,6 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = dcbrange_queue.pop() { Some(item) - // Prioritize sampling requests after block syncing requests - } else if let Some(item) = unknown_block_sampling_request_queue.pop() { - Some(item) // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1379,10 +1344,6 @@ impl BeaconProcessor { Work::RpcCustodyColumn { .. } => { rpc_custody_column_queue.push(work, work_id) } - Work::RpcVerifyDataColumn(_) => { - rpc_verify_data_column_queue.push(work, work_id) - } - Work::SamplingResult(_) => sampling_result_queue.push(work, work_id), Work::ColumnReconstruction(_) => { column_reconstruction_queue.push(work, work_id) } @@ -1425,9 +1386,6 @@ impl BeaconProcessor { Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id) } - Work::UnknownBlockSamplingRequest { .. } => { - unknown_block_sampling_request_queue.push(work, work_id) - } Work::ApiRequestP0 { .. } => api_request_p0_queue.push(work, work_id), Work::ApiRequestP1 { .. } => api_request_p1_queue.push(work, work_id), }; @@ -1451,9 +1409,6 @@ impl BeaconProcessor { WorkType::UnknownLightClientOptimisticUpdate => { unknown_light_client_update_queue.len() } - WorkType::UnknownBlockSamplingRequest => { - unknown_block_sampling_request_queue.len() - } WorkType::GossipAggregateBatch => 0, // No queue WorkType::GossipBlock => gossip_block_queue.len(), WorkType::GossipBlobSidecar => gossip_blob_queue.len(), @@ -1473,8 +1428,6 @@ impl BeaconProcessor { WorkType::RpcBlock => rpc_block_queue.len(), WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), - WorkType::RpcVerifyDataColumn => rpc_verify_data_column_queue.len(), - WorkType::SamplingResult => sampling_result_queue.len(), WorkType::ColumnReconstruction => column_reconstruction_queue.len(), WorkType::ChainSegment => chain_segment_queue.len(), WorkType::ChainSegmentBackfill => backfill_chain_segment.len(), @@ -1600,8 +1553,7 @@ impl BeaconProcessor { }), Work::UnknownBlockAttestation { process_fn } | Work::UnknownBlockAggregate { process_fn } - | Work::UnknownLightClientOptimisticUpdate { process_fn, .. } - | Work::UnknownBlockSamplingRequest { process_fn } => { + | Work::UnknownLightClientOptimisticUpdate { process_fn, .. } => { task_spawner.spawn_blocking(process_fn) } Work::DelayedImportBlock { @@ -1612,8 +1564,6 @@ impl BeaconProcessor { Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } | Work::RpcCustodyColumn(process_fn) - | Work::RpcVerifyDataColumn(process_fn) - | Work::SamplingResult(process_fn) | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index fc8c712f4e..275875b1a4 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -98,15 +98,6 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: LazyLock, -> = LazyLock::new(|| { - try_create_int_counter( - "beacon_processor_reprocessing_queue_matched_sampling_requests", - "Number of queued sampling requests where a matching block has been imported.", - ) -}); /* * Light client update reprocessing queue metrics. diff --git a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs index 855342d8bd..07d540050f 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs @@ -69,10 +69,6 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// How many light client updates we keep before new ones get dropped. const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; -/// How many sampling requests we queue before new ones get dropped. -/// TODO(das): choose a sensible value -const MAXIMUM_QUEUED_SAMPLING_REQUESTS: usize = 16_384; - // Process backfill batch 50%, 60%, 80% through each slot. // // Note: use caution to set these fractions in a way that won't cause panic-y @@ -109,8 +105,6 @@ pub enum ReprocessQueueMessage { UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), - /// A sampling request that references an unknown block. - UnknownBlockSamplingRequest(QueuedSamplingRequest), /// A new backfill batch that needs to be scheduled for processing. BackfillSync(QueuedBackfillBatch), /// A delayed column reconstruction that needs checking @@ -125,7 +119,6 @@ pub enum ReadyWork { Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), LightClientUpdate(QueuedLightClientUpdate), - SamplingRequest(QueuedSamplingRequest), BackfillSync(QueuedBackfillBatch), ColumnReconstruction(QueuedColumnReconstruction), } @@ -151,12 +144,6 @@ pub struct QueuedLightClientUpdate { pub process_fn: BlockingFn, } -/// A sampling request for which the corresponding block is not known while processing. -pub struct QueuedSamplingRequest { - pub beacon_block_root: Hash256, - pub process_fn: BlockingFn, -} - /// A block that arrived early and has been queued for later import. pub struct QueuedGossipBlock { pub beacon_block_slot: Slot, @@ -246,8 +233,6 @@ struct ReprocessQueue { attestations_delay_queue: DelayQueue, /// Queue to manage scheduled light client updates. lc_updates_delay_queue: DelayQueue, - /// Queue to manage scheduled sampling requests - sampling_requests_delay_queue: DelayQueue, /// Queue to manage scheduled column reconstructions. column_reconstructions_delay_queue: DelayQueue, @@ -264,10 +249,6 @@ struct ReprocessQueue { queued_lc_updates: FnvHashMap, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, - /// Queued sampling requests. - queued_sampling_requests: FnvHashMap, - /// Sampling requests per block root. - awaiting_sampling_requests_per_block_root: HashMap>, /// Column reconstruction per block root. queued_column_reconstructions: HashMap, /// Queued backfill batches @@ -277,18 +258,15 @@ struct ReprocessQueue { /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, next_lc_update: usize, - next_sampling_request_update: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, - sampling_request_delay_debounce: TimeLatch, next_backfill_batch_event: Option>>, slot_clock: Arc, } pub type QueuedLightClientUpdateId = usize; -pub type QueuedSamplingRequestId = usize; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum QueuedAttestationId { @@ -436,26 +414,21 @@ impl ReprocessQueue { rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), lc_updates_delay_queue: DelayQueue::new(), - sampling_requests_delay_queue: <_>::default(), column_reconstructions_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), - queued_sampling_requests: <_>::default(), awaiting_attestations_per_root: HashMap::new(), awaiting_lc_updates_per_parent_root: HashMap::new(), - awaiting_sampling_requests_per_block_root: <_>::default(), queued_backfill_batches: Vec::new(), queued_column_reconstructions: HashMap::new(), next_attestation: 0, next_lc_update: 0, - next_sampling_request_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), lc_update_delay_debounce: TimeLatch::default(), - sampling_request_delay_debounce: <_>::default(), next_backfill_batch_event: None, slot_clock, } @@ -664,34 +637,6 @@ impl ReprocessQueue { self.next_lc_update += 1; } - InboundEvent::Msg(UnknownBlockSamplingRequest(queued_sampling_request)) => { - if self.sampling_requests_delay_queue.len() >= MAXIMUM_QUEUED_SAMPLING_REQUESTS { - if self.sampling_request_delay_debounce.elapsed() { - error!( - queue_size = MAXIMUM_QUEUED_SAMPLING_REQUESTS, - "Sampling requests delay queue is full" - ); - } - // Drop the inbound message. - return; - } - - let id: QueuedSamplingRequestId = self.next_sampling_request_update; - self.next_sampling_request_update += 1; - - // Register the delay. - let delay_key = self - .sampling_requests_delay_queue - .insert(id, QUEUED_SAMPLING_REQUESTS_DELAY); - - self.awaiting_sampling_requests_per_block_root - .entry(queued_sampling_request.beacon_block_root) - .or_default() - .push(id); - - self.queued_sampling_requests - .insert(id, (queued_sampling_request, delay_key)); - } InboundEvent::Msg(BlockImported { block_root, parent_root, @@ -751,48 +696,6 @@ impl ReprocessQueue { ); } } - // Unqueue the sampling requests we have for this root, if any. - if let Some(queued_ids) = self - .awaiting_sampling_requests_per_block_root - .remove(&block_root) - { - let mut sent_count = 0; - let mut failed_to_send_count = 0; - - for id in queued_ids { - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_SAMPLING_REQUESTS, - ); - - if let Some((queued, delay_key)) = self.queued_sampling_requests.remove(&id) - { - // Remove the delay. - self.sampling_requests_delay_queue.remove(&delay_key); - - // Send the work. - let work = ReadyWork::SamplingRequest(queued); - - if self.ready_work_tx.try_send(work).is_err() { - failed_to_send_count += 1; - } else { - sent_count += 1; - } - } else { - // This should never happen. - error!(?block_root, ?id, "Unknown sampling request for block root"); - } - } - - if failed_to_send_count > 0 { - error!( - hint = "system may be overloaded", - ?block_root, - failed_to_send_count, - sent_count, - "Ignored scheduled sampling requests for block" - ); - } - } } InboundEvent::Msg(NewLightClientOptimisticUpdate { parent_root }) => { // Unqueue the light client optimistic updates we have for this root, if any. diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 3013596f9f..0f5fd99c27 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,7 +3,7 @@ use libp2p::PeerId; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ - BlobSidecar, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; @@ -87,9 +87,10 @@ pub enum RangeRequestId { BackfillSync { batch_id: Epoch }, } +// TODO(das) refactor in a separate PR. We might be able to remove this and replace +// [`DataColumnsByRootRequestId`] with a [`SingleLookupReqId`]. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { - Sampling(SamplingId), Custody(CustodyId), } @@ -99,21 +100,6 @@ pub enum RangeRequester { BackfillSync { batch_id: Epoch }, } -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct SamplingId { - pub id: SamplingRequester, - pub sampling_request_id: SamplingRequestId, -} - -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub enum SamplingRequester { - ImportedBlock(Hash256), -} - -/// Identifier of sampling requests. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct SamplingRequestId(pub usize); - #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct CustodyId { pub requester: CustodyRequester, @@ -231,13 +217,11 @@ impl_display!(ComponentsByRangeRequestId, "{}/{}", id, requester); impl_display!(DataColumnsByRootRequestId, "{}/{}", id, requester); impl_display!(SingleLookupReqId, "{}/Lookup/{}", req_id, lookup_id); impl_display!(CustodyId, "{}", requester); -impl_display!(SamplingId, "{}/{}", sampling_request_id, id); impl Display for DataColumnsByRootRequester { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { Self::Custody(id) => write!(f, "Custody/{id}"), - Self::Sampling(id) => write!(f, "Sampling/{id}"), } } } @@ -257,20 +241,6 @@ impl Display for RangeRequestId { } } -impl Display for SamplingRequestId { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl Display for SamplingRequester { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - Self::ImportedBlock(block) => write!(f, "ImportedBlock/{block}"), - } - } -} - #[cfg(test)] mod tests { use super::*; @@ -289,18 +259,6 @@ mod tests { assert_eq!(format!("{id}"), "123/Custody/121/Lookup/101"); } - #[test] - fn display_id_data_columns_by_root_sampling() { - let id = DataColumnsByRootRequestId { - id: 123, - requester: DataColumnsByRootRequester::Sampling(SamplingId { - id: SamplingRequester::ImportedBlock(Hash256::ZERO), - sampling_request_id: SamplingRequestId(101), - }), - }; - assert_eq!(format!("{id}"), "123/Sampling/101/ImportedBlock/0x0000000000000000000000000000000000000000000000000000000000000000"); - } - #[test] fn display_id_data_columns_by_range() { let id = DataColumnsByRangeRequestId { diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 05c7dc287b..24a179fa80 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -17,9 +17,6 @@ use strum::IntoEnumIterator; use types::DataColumnSubnetId; use types::EthSpec; -pub const SUCCESS: &str = "SUCCESS"; -pub const FAILURE: &str = "FAILURE"; - #[derive(Debug, AsRefStr)] pub(crate) enum BlockSource { Gossip, @@ -611,31 +608,6 @@ pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: LazyLock ) }); -/* - * Sampling - */ -pub static SAMPLE_DOWNLOAD_RESULT: LazyLock> = LazyLock::new(|| { - try_create_int_counter_vec( - "beacon_sampling_sample_verify_result_total", - "Total count of individual sample download results", - &["result"], - ) -}); -pub static SAMPLE_VERIFY_RESULT: LazyLock> = LazyLock::new(|| { - try_create_int_counter_vec( - "beacon_sampling_sample_verify_result_total", - "Total count of individual sample verify results", - &["result"], - ) -}); -pub static SAMPLING_REQUEST_RESULT: LazyLock> = LazyLock::new(|| { - try_create_int_counter_vec( - "beacon_sampling_request_result_total", - "Total count of sample request results", - &["result"], - ) -}); - pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); } @@ -683,13 +655,6 @@ pub(crate) fn register_process_result_metrics( } } -pub fn from_result(result: &std::result::Result) -> &str { - match result { - Ok(_) => SUCCESS, - Err(_) => FAILURE, - } -} - pub fn update_gossip_metrics( gossipsub: &Gossipsub, network_globals: &Arc>, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 0b17965f3c..47d1546506 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1436,21 +1436,6 @@ impl NetworkBeaconProcessor { let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; - // Note: okay to issue sampling request before the block is execution verified. If the - // proposer sends us a block with invalid blob transactions it can trigger us to issue - // sampling queries that will never resolve. This attack is equivalent to withholding data. - // Dismissed proposal to move this block to post-execution: https://github.com/sigp/lighthouse/pull/6492 - if block.num_expected_blobs() > 0 { - // Trigger sampling for block not yet execution valid. At this point column custodials are - // unlikely to have received their columns. Triggering sampling so early is only viable with - // either: - // - Sync delaying sampling until some latter window - // - Re-processing early sampling requests: https://github.com/sigp/lighthouse/pull/5569 - if self.chain.should_sample_slot(block.slot()) { - self.send_sync_message(SyncMessage::SampleBlock(block_root, block.slot())); - } - } - // Block is gossip valid. Attempt to fetch blobs from the EL using versioned hashes derived // from kzg commitments, without having to wait for all blobs to be sent from the peers. let publish_blobs = true; diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f7c3a1bf8d..19305e05ff 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,5 +1,4 @@ use crate::sync::manager::BlockProcessType; -use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::RpcBlock; @@ -498,43 +497,6 @@ impl NetworkBeaconProcessor { }) } - /// Create a new `Work` event for some sampling columns, and reports the verification result - /// back to sync. - pub fn send_rpc_validate_data_columns( - self: &Arc, - block_root: Hash256, - data_columns: Vec>>, - seen_timestamp: Duration, - id: SamplingId, - ) -> Result<(), Error> { - let s = self.clone(); - self.try_send(BeaconWorkEvent { - drop_during_sync: false, - work: Work::RpcVerifyDataColumn(Box::pin(async move { - let result = s - .clone() - .validate_rpc_data_columns(block_root, data_columns, seen_timestamp) - .await; - // Sync handles these results - s.send_sync_message(SyncMessage::SampleVerified { id, result }); - })), - }) - } - - /// Create a new `Work` event with a block sampling completed result - pub fn send_sampling_completed( - self: &Arc, - block_root: Hash256, - ) -> Result<(), Error> { - let nbp = self.clone(); - self.try_send(BeaconWorkEvent { - drop_during_sync: false, - work: Work::SamplingResult(Box::pin(async move { - nbp.process_sampling_completed(block_root).await; - })), - }) - } - /// Create a new work event to import `blocks` as a beacon chain segment. pub fn send_chain_segment( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index cff6e26165..32c4705ea8 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -8,7 +8,6 @@ use crate::sync::{ use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; -use beacon_chain::data_column_verification::verify_kzg_for_data_column_list; use beacon_chain::{ validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, @@ -25,7 +24,7 @@ use store::KzgCommitment; use tracing::{debug, error, info, warn}; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlockImportSource, DataColumnSidecar, DataColumnSidecarList, Epoch, Hash256}; +use types::{BlockImportSource, DataColumnSidecarList, Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -142,7 +141,6 @@ impl NetworkBeaconProcessor { }; let slot = block.slot(); - let block_has_data = block.as_block().num_expected_blobs() > 0; let parent_root = block.message().parent_root(); let commitments_formatted = block.as_block().commitments_formatted(); @@ -215,17 +213,6 @@ impl NetworkBeaconProcessor { _ => {} } - // RPC block imported or execution validated. If the block was already imported by gossip we - // receive Err(BlockError::AlreadyKnown). - if result.is_ok() && - // Block has at least one blob, so it produced columns - block_has_data && - // Block slot is within the DA boundary (should always be the case) and PeerDAS is activated - self.chain.should_sample_slot(slot) - { - self.send_sync_message(SyncMessage::SampleBlock(block_root, slot)); - } - // Sync handles these results self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, @@ -426,25 +413,6 @@ impl NetworkBeaconProcessor { }); } - /// Validate a list of data columns received from RPC requests - pub async fn validate_rpc_data_columns( - self: Arc>, - _block_root: Hash256, - data_columns: Vec>>, - _seen_timestamp: Duration, - ) -> Result<(), String> { - verify_kzg_for_data_column_list(data_columns.iter(), &self.chain.kzg) - .map_err(|err| format!("{err:?}")) - } - - /// Process a sampling completed event, inserting it into fork-choice - pub async fn process_sampling_completed( - self: Arc>, - block_root: Hash256, - ) { - self.chain.process_sampling_completed(block_root).await; - } - /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( @@ -570,15 +538,6 @@ impl NetworkBeaconProcessor { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if !imported_blocks.is_empty() { self.chain.recompute_head_at_current_slot().await; - - for (block_root, block_slot) in &imported_blocks { - if self.chain.should_sample_slot(*block_slot) { - self.send_sync_message(SyncMessage::SampleBlock( - *block_root, - *block_slot, - )); - } - } } (imported_blocks.len(), Ok(())) } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 81b22b99e8..944f55dba1 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,6 @@ use super::block_lookups::BlockLookups; use super::network_context::{ CustodyByRootResult, RangeBlockComponent, RangeRequestId, RpcEvent, SyncNetworkContext, }; -use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; @@ -58,7 +57,7 @@ use lighthouse_network::rpc::RPCError; use lighthouse_network::service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, - SamplingId, SamplingRequester, SingleLookupReqId, SyncRequestId, + SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -69,14 +68,11 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use tracing::{debug, error, info, info_span, trace, warn, Instrument}; +use tracing::{debug, error, info, info_span, trace, Instrument}; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, }; -#[cfg(test)] -use types::ColumnIndex; - /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. @@ -146,10 +142,6 @@ pub enum SyncMessage { /// manager to attempt to find the block matching the unknown hash. UnknownBlockHashFromAttestation(PeerId, Hash256), - /// Request to start sampling a block. Caller should ensure that block has data before sending - /// the request. - SampleBlock(Hash256, Slot), - /// A peer has disconnected. Disconnect(PeerId), @@ -172,12 +164,6 @@ pub enum SyncMessage { result: BlockProcessingResult, }, - /// Sample data column verified - SampleVerified { - id: SamplingId, - result: Result<(), String>, - }, - /// A block from gossip has completed processing, GossipBlockProcessResult { block_root: Hash256, imported: bool }, } @@ -248,8 +234,6 @@ pub struct SyncManager { /// may forward us thousands of a attestations, each one triggering an individual event. Only /// one event is useful, the rest generating log noise and wasted cycles notified_unknown_roots: LRUTimeCache<(PeerId, Hash256)>, - - sampling: Sampling, } /// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon @@ -274,7 +258,6 @@ pub fn spawn( network_send, beacon_processor, sync_recv, - SamplingConfig::Default, fork_context, ); @@ -296,7 +279,6 @@ impl SyncManager { network_send: mpsc::UnboundedSender>, beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, - sampling_config: SamplingConfig, fork_context: Arc, ) -> Self { let network_globals = beacon_processor.network_globals.clone(); @@ -315,7 +297,6 @@ impl SyncManager { notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, )), - sampling: Sampling::new(sampling_config), } } @@ -360,20 +341,6 @@ impl SyncManager { self.block_lookups.insert_failed_chain(block_root); } - #[cfg(test)] - pub(crate) fn active_sampling_requests(&self) -> Vec { - self.sampling.active_sampling_requests() - } - - #[cfg(test)] - pub(crate) fn get_sampling_request_status( - &self, - block_root: Hash256, - index: &ColumnIndex, - ) -> Option { - self.sampling.get_request_status(block_root, index) - } - #[cfg(test)] pub(crate) fn update_execution_engine_state(&mut self, state: EngineState) { self.handle_new_execution_engine_state(state); @@ -853,15 +820,6 @@ impl SyncManager { self.handle_unknown_block_root(peer_id, block_root); } } - SyncMessage::SampleBlock(block_root, block_slot) => { - debug!(%block_root, slot = %block_slot, "Received SampleBlock message"); - if let Some((requester, result)) = self - .sampling - .on_new_sample_request(block_root, &mut self.network) - { - self.on_sampling_result(requester, result) - } - } SyncMessage::Disconnect(peer_id) => { debug!(%peer_id, "Received disconnected message"); self.peer_disconnect(&peer_id); @@ -911,14 +869,6 @@ impl SyncManager { } } }, - SyncMessage::SampleVerified { id, result } => { - if let Some((requester, result)) = - self.sampling - .on_sample_verified(id, result, &mut self.network) - { - self.on_sampling_result(requester, result) - } - } } } @@ -1175,14 +1125,6 @@ impl SyncManager { .on_data_columns_by_root_response(req_id, peer_id, data_column) { match req_id.requester { - DataColumnsByRootRequester::Sampling(id) => { - if let Some((requester, result)) = - self.sampling - .on_sample_downloaded(id, peer_id, resp, &mut self.network) - { - self.on_sampling_result(requester, result) - } - } DataColumnsByRootRequester::Custody(custody_id) => { if let Some(result) = self .network @@ -1256,31 +1198,6 @@ impl SyncManager { ); } - fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { - match requester { - SamplingRequester::ImportedBlock(block_root) => { - debug!(%block_root, ?result, "Sampling result"); - - match result { - Ok(_) => { - // Notify the fork-choice of a successful sampling result to mark the block - // branch as safe. - if let Err(e) = self - .network - .beacon_processor() - .send_sampling_completed(block_root) - { - warn!(?block_root, reason = ?e, "Error sending sampling result"); - } - } - Err(e) => { - warn!(?block_root, reason = ?e, "Sampling failed"); - } - } - } - } - } - /// Handles receiving a response for a range sync request that should have both blocks and /// blobs. fn on_range_components_response( diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 0f5fd6fb9f..4dab2e17d3 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -6,12 +6,10 @@ mod block_lookups; mod block_sidecar_coupling; pub mod manager; mod network_context; -mod peer_sampling; mod peer_sync_info; mod range_sync; #[cfg(test)] mod tests; -pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; pub use range_sync::{BatchOperationOutcome, ChainId}; diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs deleted file mode 100644 index 4ad77176aa..0000000000 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ /dev/null @@ -1,735 +0,0 @@ -use self::request::ActiveColumnSampleRequest; -#[cfg(test)] -pub(crate) use self::request::Status; -use super::network_context::{ - DataColumnsByRootSingleBlockRequest, RpcResponseError, SyncNetworkContext, -}; -use crate::metrics; -use beacon_chain::BeaconChainTypes; -use fnv::FnvHashMap; -use lighthouse_network::service::api_types::{ - DataColumnsByRootRequester, SamplingId, SamplingRequestId, SamplingRequester, -}; -use lighthouse_network::{PeerAction, PeerId}; -use rand::{seq::SliceRandom, thread_rng}; -use std::{ - collections::hash_map::Entry, collections::HashMap, marker::PhantomData, sync::Arc, - time::Duration, -}; -use tracing::{debug, error, instrument, warn}; -use types::{data_column_sidecar::ColumnIndex, ChainSpec, DataColumnSidecar, Hash256}; - -pub type SamplingResult = Result<(), SamplingError>; - -type DataColumnSidecarList = Vec>>; - -pub struct Sampling { - requests: HashMap>, - sampling_config: SamplingConfig, -} - -impl Sampling { - #[instrument(parent = None, fields(service = "sampling"), name = "sampling")] - pub fn new(sampling_config: SamplingConfig) -> Self { - Self { - requests: <_>::default(), - sampling_config, - } - } - - #[cfg(test)] - #[instrument(parent = None, - fields(service = "sampling"), - name = "sampling", - skip_all - )] - pub fn active_sampling_requests(&self) -> Vec { - self.requests.values().map(|r| r.block_root).collect() - } - - #[cfg(test)] - #[instrument(parent = None, - fields(service = "sampling"), - name = "sampling", - skip_all - )] - pub fn get_request_status( - &self, - block_root: Hash256, - index: &ColumnIndex, - ) -> Option { - let requester = SamplingRequester::ImportedBlock(block_root); - self.requests - .get(&requester) - .and_then(|req| req.get_request_status(index)) - } - - /// Create a new sampling request for a known block - /// - /// ### Returns - /// - /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. - /// - `None`: Request still active, requester should do no action - #[instrument(parent = None, - fields(service = "sampling"), - name = "sampling", - skip_all - )] - pub fn on_new_sample_request( - &mut self, - block_root: Hash256, - cx: &mut SyncNetworkContext, - ) -> Option<(SamplingRequester, SamplingResult)> { - let id = SamplingRequester::ImportedBlock(block_root); - - let request = match self.requests.entry(id) { - Entry::Vacant(e) => e.insert(ActiveSamplingRequest::new( - block_root, - id, - &self.sampling_config, - &cx.chain.spec, - )), - Entry::Occupied(_) => { - // Sampling is triggered from multiple sources, duplicate sampling requests are - // likely (gossip block + gossip data column) - // TODO(das): Should track failed sampling request for some time? Otherwise there's - // a risk of a loop with multiple triggers creating the request, then failing, - // and repeat. - debug!(?id, "Ignoring duplicate sampling request"); - return None; - } - }; - - debug!( - ?id, - column_selection = ?request.column_selection(), - "Created new sample request" - ); - - // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough - // to sample here, immediately failing the sampling request. There should be some grace - // period to allow the peer manager to find custody peers. - let result = request.continue_sampling(cx); - self.handle_sampling_result(result, &id) - } - - /// Insert a downloaded column into an active sampling request. Then make progress on the - /// entire request. - /// - /// ### Returns - /// - /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. - /// - `None`: Request still active, requester should do no action - #[instrument(parent = None, - fields(service = "sampling"), - name = "sampling", - skip_all - )] - pub fn on_sample_downloaded( - &mut self, - id: SamplingId, - peer_id: PeerId, - resp: Result<(DataColumnSidecarList, Duration), RpcResponseError>, - cx: &mut SyncNetworkContext, - ) -> Option<(SamplingRequester, SamplingResult)> { - let Some(request) = self.requests.get_mut(&id.id) else { - // TOOD(das): This log can happen if the request is error'ed early and dropped - debug!(?id, "Sample downloaded event for unknown request"); - return None; - }; - - let result = request.on_sample_downloaded(peer_id, id.sampling_request_id, resp, cx); - self.handle_sampling_result(result, &id.id) - } - - /// Insert a downloaded column into an active sampling request. Then make progress on the - /// entire request. - /// - /// ### Returns - /// - /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. - /// - `None`: Request still active, requester should do no action - #[instrument(parent = None, - fields(service = "sampling"), - name = "sampling", - skip_all - )] - pub fn on_sample_verified( - &mut self, - id: SamplingId, - result: Result<(), String>, - cx: &mut SyncNetworkContext, - ) -> Option<(SamplingRequester, SamplingResult)> { - let Some(request) = self.requests.get_mut(&id.id) else { - // TOOD(das): This log can happen if the request is error'ed early and dropped - debug!(?id, "Sample verified event for unknown request"); - return None; - }; - - let result = request.on_sample_verified(id.sampling_request_id, result, cx); - self.handle_sampling_result(result, &id.id) - } - - /// Converts a result from the internal format of `ActiveSamplingRequest` (error first to use ? - /// conveniently), to an Option first format to use an `if let Some() { act on result }` pattern - /// in the sync manager. - #[instrument(parent = None, - fields(service = "sampling"), - name = "sampling", - skip_all - )] - fn handle_sampling_result( - &mut self, - result: Result, SamplingError>, - id: &SamplingRequester, - ) -> Option<(SamplingRequester, SamplingResult)> { - let result = result.transpose(); - if let Some(result) = result { - debug!(?id, ?result, "Sampling request completed, removing"); - metrics::inc_counter_vec( - &metrics::SAMPLING_REQUEST_RESULT, - &[metrics::from_result(&result)], - ); - self.requests.remove(id); - Some((*id, result)) - } else { - None - } - } -} - -pub struct ActiveSamplingRequest { - block_root: Hash256, - requester_id: SamplingRequester, - column_requests: FnvHashMap, - /// Mapping of column indexes for a sampling request. - column_indexes_by_sampling_request: FnvHashMap>, - /// Sequential ID for sampling requests. - current_sampling_request_id: SamplingRequestId, - column_shuffle: Vec, - required_successes: Vec, - _phantom: PhantomData, -} - -#[derive(Debug)] -pub enum SamplingError { - SendFailed(#[allow(dead_code)] &'static str), - ProcessorUnavailable, - TooManyFailures, - BadState(#[allow(dead_code)] String), - ColumnIndexOutOfBounds, -} - -/// Required success index by current failures, with p_target=5.00E-06 -/// Ref: https://colab.research.google.com/drive/18uUgT2i-m3CbzQ5TyP9XFKqTn1DImUJD#scrollTo=E82ITcgB5ATh -const REQUIRED_SUCCESSES: [usize; 11] = [16, 20, 23, 26, 29, 32, 34, 37, 39, 42, 44]; - -#[derive(Debug, Clone)] -pub enum SamplingConfig { - Default, - #[allow(dead_code)] - Custom { - required_successes: Vec, - }, -} - -impl ActiveSamplingRequest { - fn new( - block_root: Hash256, - requester_id: SamplingRequester, - sampling_config: &SamplingConfig, - spec: &ChainSpec, - ) -> Self { - // Select ahead of time the full list of to-sample columns - let mut column_shuffle = - (0..spec.number_of_columns as ColumnIndex).collect::>(); - let mut rng = thread_rng(); - column_shuffle.shuffle(&mut rng); - - Self { - block_root, - requester_id, - column_requests: <_>::default(), - column_indexes_by_sampling_request: <_>::default(), - current_sampling_request_id: SamplingRequestId(0), - column_shuffle, - required_successes: match sampling_config { - SamplingConfig::Default => REQUIRED_SUCCESSES.to_vec(), - SamplingConfig::Custom { required_successes } => required_successes.clone(), - }, - _phantom: PhantomData, - } - } - - #[cfg(test)] - pub fn get_request_status(&self, index: &ColumnIndex) -> Option { - self.column_requests.get(index).map(|req| req.status()) - } - - /// Return the current ordered list of columns that this requests has to sample to succeed - pub(crate) fn column_selection(&self) -> Vec { - self.column_shuffle - .iter() - .take(REQUIRED_SUCCESSES[0]) - .copied() - .collect() - } - - /// Insert a downloaded column into an active sampling request. Then make progress on the - /// entire request. - /// - /// ### Returns - /// - /// - `Err`: Sampling request has failed and will be dropped - /// - `Ok(Some)`: Sampling request has successfully completed and will be dropped - /// - `Ok(None)`: Sampling request still active - pub(crate) fn on_sample_downloaded( - &mut self, - _peer_id: PeerId, - sampling_request_id: SamplingRequestId, - resp: Result<(DataColumnSidecarList, Duration), RpcResponseError>, - cx: &mut SyncNetworkContext, - ) -> Result, SamplingError> { - // Select columns to sample - // Create individual request per column - // Progress requests - // If request fails retry or expand search - // If all good return - let Some(column_indexes) = self - .column_indexes_by_sampling_request - .get(&sampling_request_id) - else { - error!( - ?sampling_request_id, - "Column indexes for the sampling request ID not found" - ); - return Ok(None); - }; - - match resp { - Ok((mut resp_data_columns, seen_timestamp)) => { - let resp_column_indexes = resp_data_columns - .iter() - .map(|r| r.index) - .collect::>(); - debug!( - block_root = %self.block_root, - column_indexes = ?resp_column_indexes, - count = resp_data_columns.len(), - "Sample download success" - ); - metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); - - // Filter the data received in the response using the requested column indexes. - let mut data_columns = vec![]; - for column_index in column_indexes { - let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - block_root = %self.block_root, - column_index, - "Active column sample request not found" - ); - continue; - }; - - let Some(data_pos) = resp_data_columns - .iter() - .position(|data| &data.index == column_index) - else { - // Peer does not have the requested data, mark peer as "dont have" and try - // again with a different peer. - debug!( - block_root = %self.block_root, - column_index, - "Sampling peer claims to not have the data" - ); - request.on_sampling_error()?; - continue; - }; - - data_columns.push(resp_data_columns.swap_remove(data_pos)); - } - - if !resp_data_columns.is_empty() { - let resp_column_indexes = resp_data_columns - .iter() - .map(|d| d.index) - .collect::>(); - debug!( - block_root = %self.block_root, - column_indexes = ?resp_column_indexes, - "Received data that was not requested" - ); - } - - // Handle the downloaded data columns. - if data_columns.is_empty() { - debug!(block_root = %self.block_root, "Received empty response"); - self.column_indexes_by_sampling_request - .remove(&sampling_request_id); - } else { - // Overwrite `column_indexes` with the column indexes received in the response. - let column_indexes = data_columns.iter().map(|d| d.index).collect::>(); - self.column_indexes_by_sampling_request - .insert(sampling_request_id, column_indexes.clone()); - // Peer has data column, send to verify - let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { - // If processor is not available, error the entire sampling - debug!( - block = %self.block_root, - reason = "beacon processor unavailable", - "Dropping sampling" - ); - return Err(SamplingError::ProcessorUnavailable); - }; - debug!( - block = ?self.block_root, - ?column_indexes, - "Sending data_column for verification" - ); - if let Err(e) = beacon_processor.send_rpc_validate_data_columns( - self.block_root, - data_columns, - seen_timestamp, - SamplingId { - id: self.requester_id, - sampling_request_id, - }, - ) { - // Beacon processor is overloaded, drop sampling attempt. Failing to sample - // is not a permanent state so we should recover once the node has capacity - // and receives a descendant block. - error!( - block = %self.block_root, - reason = e.to_string(), - "Dropping sampling" - ); - return Err(SamplingError::SendFailed("beacon processor send failure")); - } - } - } - Err(err) => { - debug!( - block_root = %self.block_root, - ?column_indexes, - error = ?err, - "Sample download error" - ); - metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); - - // Error downloading, malicious network errors are already penalized before - // reaching this function. Mark the peer as failed and try again with another. - for column_index in column_indexes { - let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - block_root = %self.block_root, - column_index, - "Active column sample request not found" - ); - continue; - }; - request.on_sampling_error()?; - } - } - }; - - self.continue_sampling(cx) - } - - /// Insert a column verification result into an active sampling request. Then make progress - /// on the entire request. - /// - /// ### Returns - /// - /// - `Err`: Sampling request has failed and will be dropped - /// - `Ok(Some)`: Sampling request has successfully completed and will be dropped - /// - `Ok(None)`: Sampling request still active - pub(crate) fn on_sample_verified( - &mut self, - sampling_request_id: SamplingRequestId, - result: Result<(), String>, - cx: &mut SyncNetworkContext, - ) -> Result, SamplingError> { - let Some(column_indexes) = self - .column_indexes_by_sampling_request - .get(&sampling_request_id) - else { - error!( - ?sampling_request_id, - "Column indexes for the sampling request ID not found" - ); - return Ok(None); - }; - - match result { - Ok(_) => { - debug!(block_root = %self.block_root,?column_indexes, "Sample verification success"); - metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::SUCCESS]); - - // Valid, continue_sampling will maybe consider sampling succees - for column_index in column_indexes { - let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - block_root = %self.block_root, column_index, - "Active column sample request not found" - ); - continue; - }; - request.on_sampling_success()?; - } - } - Err(err) => { - debug!(block_root = %self.block_root, ?column_indexes, reason = ?err, "Sample verification failure"); - metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::FAILURE]); - - // Peer sent invalid data, penalize and try again from different peer - // TODO(das): Count individual failures - for column_index in column_indexes { - let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - block_root = %self.block_root, - column_index, - "Active column sample request not found" - ); - continue; - }; - let peer_id = request.on_sampling_error()?; - cx.report_peer( - peer_id, - PeerAction::LowToleranceError, - "invalid data column", - ); - } - } - } - - self.continue_sampling(cx) - } - - pub(crate) fn continue_sampling( - &mut self, - cx: &mut SyncNetworkContext, - ) -> Result, SamplingError> { - // First check if sampling is completed, by computing `required_successes` - let mut successes = 0; - let mut failures = 0; - let mut ongoings = 0; - - for request in self.column_requests.values() { - if request.is_completed() { - successes += 1; - } - if request.is_failed() { - failures += 1; - } - if request.is_ongoing() { - ongoings += 1; - } - } - - // If there are too many failures, consider the sampling failed - let Some(required_successes) = self.required_successes.get(failures) else { - return Err(SamplingError::TooManyFailures); - }; - - // If there are enough successes, consider the sampling complete - if successes >= *required_successes { - return Ok(Some(())); - } - - // First, attempt to progress sampling by requesting more columns, so that request failures - // are accounted for below. - - // Group the requested column indexes by the destination peer to batch sampling requests. - let mut column_indexes_to_request = FnvHashMap::default(); - for idx in 0..*required_successes { - // Re-request columns. Note: out of bounds error should never happen, inputs are hardcoded - let column_index = *self - .column_shuffle - .get(idx) - .ok_or(SamplingError::ColumnIndexOutOfBounds)?; - let request = self - .column_requests - .entry(column_index) - .or_insert(ActiveColumnSampleRequest::new(column_index)); - - if request.is_ready_to_request() { - if let Some(peer_id) = request.choose_peer(cx) { - let indexes = column_indexes_to_request.entry(peer_id).or_insert(vec![]); - indexes.push(column_index); - } - } - } - - // Send requests. - let mut sent_request = false; - for (peer_id, column_indexes) in column_indexes_to_request { - cx.data_column_lookup_request( - DataColumnsByRootRequester::Sampling(SamplingId { - id: self.requester_id, - sampling_request_id: self.current_sampling_request_id, - }), - peer_id, - DataColumnsByRootSingleBlockRequest { - block_root: self.block_root, - indices: column_indexes.clone(), - }, - // false = We issue request to custodians who may or may not have received the - // samples yet. We don't any signal (like an attestation or status messages that the - // custodian has received data). - false, - ) - .map_err(SamplingError::SendFailed)?; - self.column_indexes_by_sampling_request - .insert(self.current_sampling_request_id, column_indexes.clone()); - self.current_sampling_request_id.0 += 1; - sent_request = true; - - // Update request status. - for column_index in column_indexes { - let Some(request) = self.column_requests.get_mut(&column_index) else { - continue; - }; - request.on_start_sampling(peer_id)?; - } - } - - // Make sure that sampling doesn't stall, by ensuring that this sampling request will - // receive a new event of some type. If there are no ongoing requests, and no new - // request was sent, loop to increase the required_successes until the sampling fails if - // there are no peers. - if ongoings == 0 && !sent_request { - debug!(block_root = %self.block_root, "Sampling request stalled"); - } - - Ok(None) - } -} - -mod request { - use super::SamplingError; - use crate::sync::network_context::SyncNetworkContext; - use beacon_chain::BeaconChainTypes; - use lighthouse_network::PeerId; - use rand::seq::SliceRandom; - use rand::thread_rng; - use std::collections::HashSet; - use types::data_column_sidecar::ColumnIndex; - - pub(crate) struct ActiveColumnSampleRequest { - column_index: ColumnIndex, - status: Status, - // TODO(das): Should downscore peers that claim to not have the sample? - peers_dont_have: HashSet, - } - - // Exposed only for testing assertions in lookup tests - #[derive(Debug, Clone)] - pub(crate) enum Status { - NoPeers, - NotStarted, - Sampling(PeerId), - Verified, - } - - impl ActiveColumnSampleRequest { - pub(crate) fn new(column_index: ColumnIndex) -> Self { - Self { - column_index, - status: Status::NotStarted, - peers_dont_have: <_>::default(), - } - } - - pub(crate) fn is_completed(&self) -> bool { - match self.status { - Status::NoPeers | Status::NotStarted | Status::Sampling(_) => false, - Status::Verified => true, - } - } - - pub(crate) fn is_failed(&self) -> bool { - match self.status { - Status::NotStarted | Status::Sampling(_) | Status::Verified => false, - Status::NoPeers => true, - } - } - - pub(crate) fn is_ongoing(&self) -> bool { - match self.status { - Status::NotStarted | Status::NoPeers | Status::Verified => false, - Status::Sampling(_) => true, - } - } - - pub(crate) fn is_ready_to_request(&self) -> bool { - match self.status { - Status::NoPeers | Status::NotStarted => true, - Status::Sampling(_) | Status::Verified => false, - } - } - - #[cfg(test)] - pub(crate) fn status(&self) -> Status { - self.status.clone() - } - - pub(crate) fn choose_peer( - &mut self, - cx: &SyncNetworkContext, - ) -> Option { - // TODO: When is a fork and only a subset of your peers know about a block, sampling should only - // be queried on the peers on that fork. Should this case be handled? How to handle it? - let mut peer_ids = cx.get_custodial_peers(self.column_index); - - peer_ids.retain(|peer_id| !self.peers_dont_have.contains(peer_id)); - - if let Some(peer_id) = peer_ids.choose(&mut thread_rng()) { - Some(*peer_id) - } else { - self.status = Status::NoPeers; - None - } - } - - pub(crate) fn on_start_sampling(&mut self, peer_id: PeerId) -> Result<(), SamplingError> { - match self.status.clone() { - Status::NoPeers | Status::NotStarted => { - self.status = Status::Sampling(peer_id); - Ok(()) - } - other => Err(SamplingError::BadState(format!( - "bad state on_start_sampling expected NoPeers|NotStarted got {other:?}. column_index:{}", - self.column_index - ))), - } - } - - pub(crate) fn on_sampling_error(&mut self) -> Result { - match self.status.clone() { - Status::Sampling(peer_id) => { - self.peers_dont_have.insert(peer_id); - self.status = Status::NotStarted; - Ok(peer_id) - } - other => Err(SamplingError::BadState(format!( - "bad state on_sampling_error expected Sampling got {other:?}. column_index:{}", - self.column_index - ))), - } - } - - pub(crate) fn on_sampling_success(&mut self) -> Result<(), SamplingError> { - match &self.status { - Status::Sampling(_) => { - self.status = Status::Verified; - Ok(()) - } - other => Err(SamplingError::BadState(format!( - "bad state on_sampling_success expected Sampling got {other:?}. column_index:{}", - self.column_index - ))), - } - } - } -} diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index a2c359c87e..0dcc29ef58 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -4,8 +4,7 @@ use crate::sync::block_lookups::{ }; use crate::sync::{ manager::{BlockProcessType, BlockProcessingResult, SyncManager}, - peer_sampling::SamplingConfig, - SamplingId, SyncMessage, + SyncMessage, }; use crate::NetworkMessage; use std::sync::Arc; @@ -33,7 +32,7 @@ use lighthouse_network::{ rpc::{RPCError, RequestType, RpcErrorResponse}, service::api_types::{ AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, - SamplingRequester, SingleLookupReqId, SyncRequestId, + SingleLookupReqId, SyncRequestId, }, types::SyncState, NetworkConfig, NetworkGlobals, PeerId, @@ -50,7 +49,6 @@ use types::{ const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; -const SAMPLING_REQUIRED_SUCCESSES: usize = 2; type DCByRootIds = Vec; type DCByRootId = (SyncRequestId, Vec); @@ -124,9 +122,6 @@ impl TestRig { beacon_processor.into(), // Pass empty recv not tied to any tx mpsc::unbounded_channel().1, - SamplingConfig::Custom { - required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], - }, fork_context, ), harness, @@ -180,10 +175,6 @@ impl TestRig { )); } - fn trigger_sample_block(&mut self, block_root: Hash256, block_slot: Slot) { - self.send_sync_message(SyncMessage::SampleBlock(block_root, block_slot)) - } - /// Drain all sync messages in the sync_rx attached to the beacon processor fn drain_sync_rx(&mut self) { while let Ok(sync_message) = self.sync_rx.try_recv() { @@ -260,27 +251,6 @@ impl TestRig { ); } - fn expect_no_active_sampling(&mut self) { - assert_eq!( - self.sync_manager.active_sampling_requests(), - Vec::::new(), - "expected no active sampling" - ); - } - - fn expect_active_sampling(&mut self, block_root: &Hash256) { - assert!(self - .sync_manager - .active_sampling_requests() - .contains(block_root)); - } - - fn expect_clean_finished_sampling(&mut self) { - self.expect_empty_network(); - self.expect_sampling_result_work(); - self.expect_no_active_sampling(); - } - fn assert_parent_lookups_count(&self, count: usize) { assert_eq!( self.active_parent_lookups_count(), @@ -613,39 +583,6 @@ impl TestRig { }) } - fn return_empty_sampling_requests(&mut self, ids: DCByRootIds) { - for id in ids { - self.log(&format!("return empty data column for {id:?}")); - self.return_empty_sampling_request(id) - } - } - - fn return_empty_sampling_request(&mut self, (sync_request_id, _): DCByRootId) { - let peer_id = PeerId::random(); - // Send stream termination - self.send_sync_message(SyncMessage::RpcDataColumn { - sync_request_id, - peer_id, - data_column: None, - seen_timestamp: timestamp_now(), - }); - } - - fn sampling_requests_failed( - &mut self, - sampling_ids: DCByRootIds, - peer_id: PeerId, - error: RPCError, - ) { - for (sync_request_id, _) in sampling_ids { - self.send_sync_message(SyncMessage::RpcError { - peer_id, - sync_request_id, - error: error.clone(), - }) - } - } - fn complete_valid_block_request( &mut self, id: SingleLookupReqId, @@ -672,51 +609,6 @@ impl TestRig { ) } - fn complete_valid_sampling_column_requests( - &mut self, - ids: DCByRootIds, - data_columns: Vec>>, - ) { - for id in ids { - self.log(&format!("return valid data column for {id:?}")); - let indices = &id.1; - let columns_to_send = indices - .iter() - .map(|&i| data_columns[i as usize].clone()) - .collect::>(); - self.complete_valid_sampling_column_request(id, &columns_to_send); - } - } - - fn complete_valid_sampling_column_request( - &mut self, - id: DCByRootId, - data_columns: &[Arc>], - ) { - let first_dc = data_columns.first().unwrap(); - let block_root = first_dc.block_root(); - let sampling_request_id = match id.0 { - SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { - requester: DataColumnsByRootRequester::Sampling(sampling_id), - .. - }) => sampling_id.sampling_request_id, - _ => unreachable!(), - }; - self.complete_data_columns_by_root_request(id, data_columns); - - // Expect work event - self.expect_rpc_sample_verify_work_event(); - - // Respond with valid result - self.send_sync_message(SyncMessage::SampleVerified { - id: SamplingId { - id: SamplingRequester::ImportedBlock(block_root), - sampling_request_id, - }, - result: Ok(()), - }) - } - fn complete_valid_custody_request( &mut self, ids: DCByRootIds, @@ -1047,28 +939,7 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected RPC custody column work: {e}")) } - fn expect_rpc_sample_verify_work_event(&mut self) { - self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::WorkType::RpcVerifyDataColumn { - Some(()) - } else { - None - } - }) - .unwrap_or_else(|e| panic!("Expected sample verify work: {e}")) - } - - fn expect_sampling_result_work(&mut self) { - self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::WorkType::SamplingResult { - Some(()) - } else { - None - } - }) - .unwrap_or_else(|e| panic!("Expected sampling result work: {e}")) - } - + #[allow(dead_code)] fn expect_no_work_event(&mut self) { self.drain_processor_rx(); assert!(self.network_rx_queue.is_empty()); @@ -1280,46 +1151,6 @@ impl TestRig { imported: false, }); } - - fn assert_sampling_request_ongoing(&self, block_root: Hash256, indices: &[ColumnIndex]) { - for index in indices { - let status = self - .sync_manager - .get_sampling_request_status(block_root, index) - .unwrap_or_else(|| panic!("No request state for {index}")); - if !matches!(status, crate::sync::peer_sampling::Status::Sampling { .. }) { - panic!("expected {block_root} {index} request to be on going: {status:?}"); - } - } - } - - fn assert_sampling_request_nopeers(&self, block_root: Hash256, indices: &[ColumnIndex]) { - for index in indices { - let status = self - .sync_manager - .get_sampling_request_status(block_root, index) - .unwrap_or_else(|| panic!("No request state for {index}")); - if !matches!(status, crate::sync::peer_sampling::Status::NoPeers) { - panic!("expected {block_root} {index} request to be no peers: {status:?}"); - } - } - } - - fn log_sampling_requests(&self, block_root: Hash256, indices: &[ColumnIndex]) { - let statuses = indices - .iter() - .map(|index| { - let status = self - .sync_manager - .get_sampling_request_status(block_root, index) - .unwrap_or_else(|| panic!("No request state for {index}")); - (index, status) - }) - .collect::>(); - self.log(&format!( - "Sampling request status for {block_root}: {statuses:?}" - )); - } } #[test] @@ -2074,137 +1905,6 @@ fn blobs_in_da_checker_skip_download() { r.expect_no_active_lookups(); } -#[test] -fn sampling_happy_path() { - let Some(mut r) = TestRig::test_setup_after_fulu() else { - return; - }; - r.new_connected_peers_for_peerdas(); - let (block, data_columns) = r.rand_block_and_data_columns(); - let block_root = block.canonical_root(); - r.trigger_sample_block(block_root, block.slot()); - // Retrieve all outgoing sample requests for random column indexes - let sampling_ids = - r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); - // Resolve all of them one by one - r.complete_valid_sampling_column_requests(sampling_ids, data_columns); - r.expect_clean_finished_sampling(); -} - -#[test] -fn sampling_with_retries() { - let Some(mut r) = TestRig::test_setup_after_fulu() else { - return; - }; - r.new_connected_peers_for_peerdas(); - // Add another supernode to ensure that the node can retry. - r.new_connected_supernode_peer(); - let (block, data_columns) = r.rand_block_and_data_columns(); - let block_root = block.canonical_root(); - r.trigger_sample_block(block_root, block.slot()); - // Retrieve all outgoing sample requests for random column indexes, and return empty responses - let sampling_ids = - r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); - r.return_empty_sampling_requests(sampling_ids); - // Expect retries for all of them, and resolve them - let sampling_ids = - r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); - r.complete_valid_sampling_column_requests(sampling_ids, data_columns); - r.expect_clean_finished_sampling(); -} - -#[test] -fn sampling_avoid_retrying_same_peer() { - let Some(mut r) = TestRig::test_setup_after_fulu() else { - return; - }; - let peer_id_1 = r.new_connected_supernode_peer(); - let peer_id_2 = r.new_connected_supernode_peer(); - let block_root = Hash256::random(); - r.trigger_sample_block(block_root, Slot::new(0)); - // Retrieve all outgoing sample requests for random column indexes, and return empty responses - let sampling_ids = - r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); - r.sampling_requests_failed(sampling_ids, peer_id_1, RPCError::Disconnected); - // Should retry the other peer - let sampling_ids = - r.expect_only_data_columns_by_root_requests(block_root, SAMPLING_REQUIRED_SUCCESSES); - r.sampling_requests_failed(sampling_ids, peer_id_2, RPCError::Disconnected); - // Expect no more retries - r.expect_empty_network(); -} - -#[test] -fn sampling_batch_requests() { - let Some(mut r) = TestRig::test_setup_after_fulu() else { - return; - }; - let _supernode = r.new_connected_supernode_peer(); - let (block, data_columns) = r.rand_block_and_data_columns(); - let block_root = block.canonical_root(); - r.trigger_sample_block(block_root, block.slot()); - - // Retrieve the sample request, which should be batched. - let (sync_request_id, column_indexes) = r - .expect_only_data_columns_by_root_requests(block_root, 1) - .pop() - .unwrap(); - assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); - r.assert_sampling_request_ongoing(block_root, &column_indexes); - - // Resolve the request. - r.complete_valid_sampling_column_requests( - vec![(sync_request_id, column_indexes.clone())], - data_columns, - ); - r.expect_clean_finished_sampling(); -} - -#[test] -fn sampling_batch_requests_not_enough_responses_returned() { - let Some(mut r) = TestRig::test_setup_after_fulu() else { - return; - }; - let _supernode = r.new_connected_supernode_peer(); - let (block, data_columns) = r.rand_block_and_data_columns(); - let block_root = block.canonical_root(); - r.trigger_sample_block(block_root, block.slot()); - - // Retrieve the sample request, which should be batched. - let (sync_request_id, column_indexes) = r - .expect_only_data_columns_by_root_requests(block_root, 1) - .pop() - .unwrap(); - assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); - - // The request status should be set to Sampling. - r.assert_sampling_request_ongoing(block_root, &column_indexes); - - // Split the indexes to simulate the case where the supernode doesn't have the requested column. - let (column_indexes_supernode_does_not_have, column_indexes_to_complete) = - column_indexes.split_at(1); - - // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. - let data_columns_to_complete = data_columns - .iter() - .filter(|d| column_indexes_to_complete.contains(&d.index)) - .cloned() - .collect::>(); - r.complete_data_columns_by_root_request( - (sync_request_id, column_indexes.clone()), - &data_columns_to_complete, - ); - - // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. - r.log_sampling_requests(block_root, &column_indexes); - r.assert_sampling_request_nopeers(block_root, column_indexes_supernode_does_not_have); - - // The sampling request stalls. - r.expect_empty_network(); - r.expect_no_work_event(); - r.expect_active_sampling(&block_root); -} - #[test] fn custody_lookup_happy_path() { let Some(mut r) = TestRig::test_setup_after_fulu() else { @@ -2233,9 +1933,6 @@ fn custody_lookup_happy_path() { // - Respond with stream terminator // ^ The stream terminator should be ignored and not close the next retry -// TODO(das): Test error early a sampling request and it getting drop + then receiving responses -// from pending requests. - mod deneb_only { use super::*; use beacon_chain::{ diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index eb27a03552..e58320010f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -78,15 +78,6 @@ pub fn cli_app() -> Command { .hide(true) .display_order(0) ) - .arg( - Arg::new("enable-sampling") - .long("enable-sampling") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help("Enable peer sampling on data columns. Disabled by default.") - .hide(true) - .display_order(0) - ) .arg( Arg::new("blob-publication-batches") .long("blob-publication-batches") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f55b91d58c..1cf56ae043 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -192,10 +192,6 @@ pub fn get_config( client_config.chain.shuffling_cache_size = cache_size; } - if cli_args.get_flag("enable-sampling") { - client_config.chain.enable_sampling = true; - } - if let Some(batches) = clap_utils::parse_optional(cli_args, "blob-publication-batches")? { client_config.chain.blob_publication_batches = batches; } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 884e5eddeb..e064096aec 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -799,13 +799,6 @@ fn network_subscribe_all_data_column_subnets_flag() { .with_config(|config| assert!(config.network.subscribe_all_data_column_subnets)); } #[test] -fn network_enable_sampling_flag() { - CommandLineTest::new() - .flag("enable-sampling", None) - .run_with_zero_port() - .with_config(|config| assert!(config.chain.enable_sampling)); -} -#[test] fn blob_publication_batches() { CommandLineTest::new() .flag("blob-publication-batches", Some("3")) @@ -826,12 +819,6 @@ fn blob_publication_batch_interval() { }); } -#[test] -fn network_enable_sampling_flag_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(!config.chain.enable_sampling)); -} #[test] fn network_subscribe_all_subnets_flag() { CommandLineTest::new() From 9911f348bc17c3713e400ad2e8257ce49b3f4150 Mon Sep 17 00:00:00 2001 From: Eric Tu <6364934+ec2@users.noreply.github.com> Date: Wed, 23 Jul 2025 12:55:02 -0400 Subject: [PATCH 44/44] Feature gate arbitrary crate in the consensus types crate (#7743) Which issue # does this PR address? Puts the `arbitrary` crate behind a feature flag in the `types` crate. --- Cargo.lock | 4 +- Cargo.toml | 2 +- Dockerfile | 2 +- consensus/types/Cargo.toml | 24 +++++---- consensus/types/src/activation_queue.rs | 3 +- consensus/types/src/aggregate_and_proof.rs | 17 +++--- consensus/types/src/attestation.rs | 25 ++++----- consensus/types/src/attestation_data.rs | 2 +- consensus/types/src/attestation_duty.rs | 3 +- consensus/types/src/attester_slashing.rs | 15 ++++-- consensus/types/src/beacon_block.rs | 15 ++++-- consensus/types/src/beacon_block_body.rs | 17 ++++-- consensus/types/src/beacon_block_header.rs | 14 +---- consensus/types/src/beacon_committee.rs | 3 +- consensus/types/src/beacon_state.rs | 18 +++++-- consensus/types/src/beacon_state/balance.rs | 4 +- .../types/src/beacon_state/committee_cache.rs | 1 + .../types/src/beacon_state/exit_cache.rs | 1 + .../progressive_balances_cache.rs | 10 ++-- .../types/src/beacon_state/pubkey_cache.rs | 1 + .../types/src/beacon_state/slashings_cache.rs | 6 ++- consensus/types/src/blob_sidecar.rs | 17 +++--- .../types/src/bls_to_execution_change.rs | 14 +---- consensus/types/src/chain_spec.rs | 9 ++-- consensus/types/src/checkpoint.rs | 2 +- consensus/types/src/consolidation_request.rs | 14 +---- consensus/types/src/contribution_and_proof.rs | 18 +++---- consensus/types/src/data_column_sidecar.rs | 17 +++--- consensus/types/src/data_column_subnet_id.rs | 3 +- consensus/types/src/deposit.rs | 13 +---- consensus/types/src/deposit_data.rs | 13 +---- consensus/types/src/deposit_message.rs | 14 +---- consensus/types/src/deposit_request.rs | 13 +---- consensus/types/src/enr_fork_id.rs | 13 +---- consensus/types/src/epoch_cache.rs | 9 ++-- consensus/types/src/eth1_data.rs | 2 +- consensus/types/src/eth_spec.rs | 13 ++--- consensus/types/src/execution_block_hash.rs | 14 +---- consensus/types/src/execution_payload.rs | 15 ++++-- .../types/src/execution_payload_header.rs | 15 ++++-- consensus/types/src/execution_requests.rs | 18 +++---- consensus/types/src/fork.rs | 2 +- consensus/types/src/fork_data.rs | 13 +---- consensus/types/src/graffiti.rs | 2 +- consensus/types/src/historical_batch.rs | 17 ++---- consensus/types/src/historical_summary.rs | 2 +- consensus/types/src/indexed_attestation.rs | 23 ++++---- consensus/types/src/light_client_bootstrap.rs | 15 ++++-- .../types/src/light_client_finality_update.rs | 15 ++++-- consensus/types/src/light_client_header.rs | 17 ++++-- .../src/light_client_optimistic_update.rs | 15 ++++-- consensus/types/src/light_client_update.rs | 15 ++++-- consensus/types/src/participation_flags.rs | 2 +- consensus/types/src/payload.rs | 52 +++++++++++++++---- consensus/types/src/pending_attestation.rs | 17 ++---- consensus/types/src/pending_consolidation.rs | 14 +---- consensus/types/src/pending_deposit.rs | 13 +---- .../types/src/pending_partial_withdrawal.rs | 14 +---- consensus/types/src/proposer_slashing.rs | 14 +---- consensus/types/src/relative_epoch.rs | 3 +- consensus/types/src/selection_proof.rs | 3 +- .../types/src/signed_aggregate_and_proof.rs | 15 ++++-- consensus/types/src/signed_beacon_block.rs | 18 ++++--- .../types/src/signed_beacon_block_header.rs | 14 +---- .../src/signed_bls_to_execution_change.rs | 14 +---- .../src/signed_contribution_and_proof.rs | 17 ++---- consensus/types/src/signed_voluntary_exit.rs | 13 +---- consensus/types/src/signing_data.rs | 14 +---- consensus/types/src/slot_epoch.rs | 30 ++--------- consensus/types/src/subnet_id.rs | 3 +- consensus/types/src/sync_aggregate.rs | 18 +++---- .../src/sync_aggregator_selection_data.rs | 13 +---- consensus/types/src/sync_committee.rs | 17 ++---- .../types/src/sync_committee_contribution.rs | 17 ++---- consensus/types/src/sync_committee_message.rs | 14 +---- consensus/types/src/sync_selection_proof.rs | 3 +- consensus/types/src/sync_subnet_id.rs | 3 +- consensus/types/src/validator.rs | 13 +---- consensus/types/src/voluntary_exit.rs | 13 +---- consensus/types/src/withdrawal.rs | 14 +---- consensus/types/src/withdrawal_request.rs | 14 +---- lighthouse/Cargo.toml | 2 +- 82 files changed, 404 insertions(+), 576 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af5d63e97f..3786392feb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6016,9 +6016,9 @@ dependencies = [ [[package]] name = "milhouse" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdc758ed0c2597254f45baa97c8aa35f44ae0c8b04ddc355f135ced531f316d6" +checksum = "2bdb104e38d3a8c5ffb7e9d2c43c522e6bcc34070edbadba565e722f0dee56c7" dependencies = [ "alloy-primitives", "arbitrary", diff --git a/Cargo.toml b/Cargo.toml index 6737ff22c5..ea64f5ae98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -184,7 +184,7 @@ malloc_utils = { path = "common/malloc_utils" } maplit = "1" merkle_proof = { path = "consensus/merkle_proof" } metrics = { path = "common/metrics" } -milhouse = "0.6" +milhouse = { version = "0.7", default-features = false } mockall = "0.13" mockall_double = "0.3" mockito = "1.5.0" diff --git a/Dockerfile b/Dockerfile index 437c864c30..f925836e48 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.84.0-bullseye AS builder +FROM rust:1.88.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index ec6835defc..50ca1835d0 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,18 +9,22 @@ default = ["sqlite", "legacy-arith"] # Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. legacy-arith = [] sqlite = ["dep:rusqlite"] -# The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. -# For simplicity `Arbitrary` is now derived regardless of the feature's presence. -arbitrary-fuzz = [] +arbitrary = [ + "dep:arbitrary", + "bls/arbitrary", + "ethereum_ssz/arbitrary", + "milhouse/arbitrary", + "ssz_types/arbitrary", + "swap_or_not_shuffle/arbitrary", +] +arbitrary-fuzz = ["arbitrary"] portable = ["bls/supranational-portable"] [dependencies] alloy-primitives = { workspace = true } alloy-rlp = { version = "0.3.4", features = ["derive"] } -# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by -# `AbstractExecPayload` -arbitrary = { workspace = true, features = ["derive"] } -bls = { workspace = true, features = ["arbitrary"] } +arbitrary = { workspace = true, features = ["derive"], optional = true } +bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } context_deserialize = { workspace = true } @@ -29,7 +33,7 @@ derivative = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } -ethereum_ssz = { workspace = true, features = ["arbitrary"] } +ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } fixed_bytes = { workspace = true } hex = { workspace = true } @@ -52,9 +56,9 @@ serde = { workspace = true, features = ["rc"] } serde_json = { workspace = true } serde_yaml = { workspace = true } smallvec = { workspace = true } -ssz_types = { workspace = true, features = ["arbitrary"] } +ssz_types = { workspace = true } superstruct = { workspace = true } -swap_or_not_shuffle = { workspace = true, features = ["arbitrary"] } +swap_or_not_shuffle = { workspace = true } tempfile = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/activation_queue.rs index 09ffa5b85e..dd3ce5f88c 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/activation_queue.rs @@ -2,7 +2,8 @@ use crate::{ChainSpec, Epoch, Validator}; use std::collections::BTreeSet; /// Activation queue computed during epoch processing for use in the *next* epoch. -#[derive(Debug, PartialEq, Eq, Default, Clone, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Eq, Default, Clone)] pub struct ActivationQueue { /// Validators represented by `(activation_eligibility_epoch, index)` in sorted order. /// diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index a280afeaae..374fd3f0ff 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -16,7 +16,6 @@ use tree_hash_derive::TreeHash; variants(Base, Electra), variant_attributes( derive( - arbitrary::Arbitrary, Debug, Clone, PartialEq, @@ -29,23 +28,29 @@ use tree_hash_derive::TreeHash; ), context_deserialize(ForkName), serde(bound = "E: EthSpec"), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), ), ref_attributes( - derive(Debug, PartialEq, TreeHash, Serialize,), + derive(Debug, PartialEq, TreeHash, Serialize), serde(untagged, bound = "E: EthSpec"), tree_hash(enum_behaviour = "transparent") ), map_ref_into(AttestationRef) )] -#[derive( - arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, TreeHash, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, TreeHash)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index de0e86489d..85d442bff1 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -46,34 +46,31 @@ impl From for Error { Encode, TestRandom, Derivative, - arbitrary::Arbitrary, TreeHash, ), context_deserialize(ForkName), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") + ) ), ref_attributes(derive(TreeHash), tree_hash(enum_behaviour = "transparent")), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive( - Debug, - Clone, - Serialize, - TreeHash, - Encode, - Derivative, - Deserialize, - arbitrary::Arbitrary, - PartialEq, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Derivative, Deserialize, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct Attestation { #[superstruct(only(Base), partial_getter(rename = "aggregation_bits_base"))] pub aggregation_bits: BitList, @@ -601,6 +598,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Vec> } */ +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, Clone, @@ -610,7 +608,6 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Vec> Encode, TestRandom, Derivative, - arbitrary::Arbitrary, TreeHash, PartialEq, )] diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index d0d4dcc553..26ca5f1aec 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, Debug, Clone, PartialEq, diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 22b03dda61..70c7c5c170 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,7 +1,8 @@ use crate::*; use serde::{Deserialize, Serialize}; -#[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { /// The slot during which the attester must attest. pub slot: Slot, diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index f671a43c9c..82611b6c7b 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -25,21 +25,26 @@ use tree_hash_derive::TreeHash; Decode, TreeHash, TestRandom, - arbitrary::Arbitrary ), context_deserialize(ForkName), derivative(PartialEq, Eq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec"), - arbitrary(bound = "E: EthSpec") + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") + ), ), ref_attributes(derive(Debug)) )] -#[derive( - Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec", untagged)] -#[arbitrary(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] pub struct AttesterSlashing { diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 9168a3feee..6a2bb88d04 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -28,14 +28,17 @@ use self::indexed_attestation::IndexedAttestationBase; TreeHash, TestRandom, Derivative, - arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec, Payload: AbstractExecPayload")), serde( bound = "E: EthSpec, Payload: AbstractExecPayload", deny_unknown_fields ), - arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") + ) ), ref_attributes( derive(Debug, PartialEq, TreeHash), @@ -44,13 +47,15 @@ use self::indexed_attestation::IndexedAttestationBase; map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] -#[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconBlock = FullPayload> { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 4440c5cf25..dca9aa14c3 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -40,14 +40,17 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; TreeHash, TestRandom, Derivative, - arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec, Payload: AbstractExecPayload")), serde( bound = "E: EthSpec, Payload: AbstractExecPayload", deny_unknown_fields ), - arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), + ), context_deserialize(ForkName), ), specific_variant_attributes( @@ -62,12 +65,16 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative, TreeHash, arbitrary::Arbitrary)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") +)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative, TreeHash)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] -#[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, @@ -128,7 +135,7 @@ pub struct BeaconBlockBody = FullPay #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[serde(skip)] - #[arbitrary(default)] + #[cfg_attr(feature = "arbitrary", arbitrary(default))] pub _phantom: PhantomData, } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 8416f975db..7cdbd2eee1 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -11,19 +11,9 @@ use tree_hash_derive::TreeHash; /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct BeaconBlockHeader { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/beacon_committee.rs index bdb91cd6e6..04fe763a11 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/beacon_committee.rs @@ -17,7 +17,8 @@ impl BeaconCommittee<'_> { } } -#[derive(arbitrary::Arbitrary, Default, Clone, Debug, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Default, Clone, Debug, PartialEq)] pub struct OwnedBeaconCommittee { pub slot: Slot, pub index: CommitteeIndex, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 31bc949583..bddfb6445a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -191,7 +191,8 @@ impl AllowNextEpoch { } } -#[derive(PartialEq, Eq, Hash, Clone, Copy, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct BeaconStateHash(Hash256); impl fmt::Debug for BeaconStateHash { @@ -240,10 +241,13 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, - arbitrary::Arbitrary, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") + ), derivative(Clone), ), specific_variant_attributes( @@ -350,10 +354,14 @@ impl From for Hash256 { partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), map_ref_mut_into(BeaconStateRef) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, arbitrary::Arbitrary)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconState where diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/beacon_state/balance.rs index e537a5b984..cd449bdb82 100644 --- a/consensus/types/src/beacon_state/balance.rs +++ b/consensus/types/src/beacon_state/balance.rs @@ -1,10 +1,12 @@ +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use safe_arith::{ArithError, SafeArith}; /// A balance which will never be below the specified `minimum`. /// /// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. -#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(PartialEq, Debug, Clone, Copy)] pub struct Balance { raw: u64, minimum: u64, diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index e3fb339c87..513e538526 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -374,6 +374,7 @@ where active } +#[cfg(feature = "arbitrary")] impl arbitrary::Arbitrary<'_> for CommitteeCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 0bb984b667..2828a6138c 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -86,6 +86,7 @@ impl ExitCache { } } +#[cfg(feature = "arbitrary")] impl arbitrary::Arbitrary<'_> for ExitCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index 8e8a1a6aa9..019bf1c3d3 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -6,6 +6,7 @@ use crate::{ }, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, }; +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use safe_arith::SafeArith; @@ -13,12 +14,14 @@ use safe_arith::SafeArith; /// epochs. The cached values can be utilised by fork choice to calculate unrealized justification /// and finalization instead of converting epoch participation arrays to balances for each block we /// process. -#[derive(Default, Debug, PartialEq, Arbitrary, Clone)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Default, Debug, PartialEq, Clone)] pub struct ProgressiveBalancesCache { inner: Option, } -#[derive(Debug, PartialEq, Arbitrary, Clone)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Debug, PartialEq, Clone)] struct Inner { pub current_epoch: Epoch, pub previous_epoch_cache: EpochTotalBalances, @@ -26,7 +29,8 @@ struct Inner { } /// Caches the participation values for one epoch (either the previous or current). -#[derive(PartialEq, Debug, Clone, Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(PartialEq, Debug, Clone)] pub struct EpochTotalBalances { /// Stores the sum of the balances for all validators in `self.unslashed_participating_indices` /// for all flags in `NUM_FLAG_INDICES`. diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index d58dd7bc1d..85ed00340d 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -43,6 +43,7 @@ impl PubkeyCache { } } +#[cfg(feature = "arbitrary")] impl arbitrary::Arbitrary<'_> for PubkeyCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/beacon_state/slashings_cache.rs index 45d8f7e212..6530f795e9 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/beacon_state/slashings_cache.rs @@ -1,12 +1,14 @@ use crate::{BeaconStateError, Slot, Validator}; +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use rpds::HashTrieSetSync as HashTrieSet; /// Persistent (cheap to clone) cache of all slashed validator indices. -#[derive(Debug, Default, Clone, PartialEq, Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Debug, Default, Clone, PartialEq)] pub struct SlashingsCache { latest_block_slot: Option, - #[arbitrary(default)] + #[cfg_attr(feature = "arbitrary", arbitrary(default))] slashed_validators: HashTrieSet, } diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index f7a5725c5a..dbe4360901 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -44,21 +44,16 @@ impl Ord for BlobIdentifier { } } +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] #[derive( - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - Derivative, - arbitrary::Arbitrary, + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, )] #[context_deserialize(ForkName)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] pub struct BlobSidecar { #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index b333862220..72d737ac71 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -5,19 +5,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct BlsToExecutionChange { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 49537073b4..b6dafaeb00 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -33,7 +33,8 @@ pub enum Domain { /// Lighthouse's internal configuration struct. /// /// Contains a mixture of "preset" and "config" values w.r.t to the EF definitions. -#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Debug, Clone)] pub struct ChainSpec { /* * Config name @@ -1459,7 +1460,8 @@ impl Default for ChainSpec { } } -#[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] pub struct BlobParameters { pub epoch: Epoch, @@ -1469,7 +1471,8 @@ pub struct BlobParameters { // A wrapper around a vector of BlobParameters to ensure that the vector is reverse // sorted by epoch. -#[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone)] pub struct BlobSchedule(Vec); impl<'de> Deserialize<'de> for BlobSchedule { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index c3cb1d5c36..2bb1df51c0 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation_request.rs index c7375dab84..87098beaee 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation_request.rs @@ -6,19 +6,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct ConsolidationRequest { diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index e918beacb0..85c9ac15fb 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -10,20 +10,14 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// A Validators aggregate sync committee contribution and selection proof. -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, - arbitrary::Arbitrary, + +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 612ddb6eb8..1401956331 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -95,20 +95,15 @@ impl RuntimeVariableList { pub type DataColumnSidecarList = Vec>>; +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] #[derive( - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - Derivative, - arbitrary::Arbitrary, + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, )] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] #[context_deserialize(ForkName)] pub struct DataColumnSidecar { diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data_column_subnet_id.rs index 5b3eef24cc..3c3a1310e4 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data_column_subnet_id.rs @@ -6,7 +6,8 @@ use serde::{Deserialize, Serialize}; use std::fmt::{self, Display}; use std::ops::{Deref, DerefMut}; -#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct DataColumnSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index 8b4b6af95d..724f3de2f0 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -12,18 +12,9 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct Deposit { diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d29e8c8d14..3d9ae12808 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -8,18 +8,9 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct DepositData { diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 5c2a0b7c2b..9fe3b87885 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -9,18 +9,8 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, -)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] pub struct DepositMessage { pub pubkey: PublicKeyBytes, diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index 141258b5ab..16acfb3b44 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -8,18 +8,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct DepositRequest { diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index e3742cb96c..40718380a5 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -10,18 +10,9 @@ use tree_hash_derive::TreeHash; /// a nodes local ENR. /// /// Spec v0.11 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Default, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct EnrForkId { /// Fork digest of the current fork computed from [`ChainSpec::compute_fork_digest`]. diff --git a/consensus/types/src/epoch_cache.rs b/consensus/types/src/epoch_cache.rs index b447e9b71e..ef91c20d75 100644 --- a/consensus/types/src/epoch_cache.rs +++ b/consensus/types/src/epoch_cache.rs @@ -8,12 +8,14 @@ use std::sync::Arc; /// to as the "decision block". This cache is very similar to the `BeaconProposerCache` in that /// beacon proposers are determined at exactly the same time as the values in this cache, so /// the keys for the two caches are identical. -#[derive(Debug, PartialEq, Eq, Clone, Default, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct EpochCache { inner: Option>, } -#[derive(Debug, PartialEq, Eq, Clone, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Eq, Clone)] struct Inner { /// Unique identifier for this cache, which can be used to check its validity before use /// with any `BeaconState`. @@ -30,7 +32,8 @@ struct Inner { effective_balance_increment: u64, } -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub struct EpochCacheKey { pub epoch: Epoch, pub decision_block_root: Hash256, diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 7bd0d3228d..42de3ed806 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -10,8 +10,8 @@ use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, Debug, PartialEq, Clone, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index bc87a4bd80..40006caf1e 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -49,9 +49,7 @@ impl fmt::Display for EthSpecId { } } -pub trait EthSpec: - 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq + for<'a> arbitrary::Arbitrary<'a> -{ +pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq { /* * Constants */ @@ -394,7 +392,8 @@ macro_rules! params_from_eth_spec { } /// Ethereum Foundation specifications. -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { @@ -460,7 +459,8 @@ impl EthSpec for MainnetEthSpec { } /// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo. -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { @@ -529,7 +529,8 @@ impl EthSpec for MinimalEthSpec { } /// Gnosis Beacon Chain specifications. -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] pub struct GnosisEthSpec; impl EthSpec for GnosisEthSpec { diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 6c031f6899..c1223a64f0 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -7,18 +7,8 @@ use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; -#[derive( - arbitrary::Arbitrary, - Default, - Clone, - Copy, - Serialize, - Deserialize, - Eq, - PartialEq, - Hash, - Derivative, -)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] #[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(#[serde(with = "serde_utils::b256_hex")] pub Hash256); diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index b4b0608150..17e3a49496 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -28,24 +28,29 @@ pub type Withdrawals = VariableList::MaxWithdrawal TreeHash, TestRandom, Derivative, - arbitrary::Arbitrary ), context_deserialize(ForkName), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec") + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), map_into(FullPayload, BlindedPayload), map_ref_into(ExecutionPayloadHeader) )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec", untagged)] -#[arbitrary(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] pub struct ExecutionPayload { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index a16f29819d..9abc6e9e32 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -21,11 +21,14 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, - arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), context_deserialize(ForkName), ), ref_attributes( @@ -36,12 +39,14 @@ use tree_hash_derive::TreeHash; partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), map_ref_into(ExecutionPayloadHeader) )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec", untagged)] -#[arbitrary(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct ExecutionPayloadHeader { diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 2fec3b5f66..592dda5d5e 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -18,21 +18,15 @@ pub type WithdrawalRequests = pub type ConsolidationRequests = VariableList::MaxConsolidationRequestsPerPayload>; +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] #[derive( - arbitrary::Arbitrary, - Debug, - Derivative, - Default, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, Derivative, Default, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] #[context_deserialize(ForkName)] pub struct ExecutionRequests { diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 239ffe33c0..19a137b108 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -10,8 +10,8 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index 1ac91084d2..7a4c07528a 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -10,18 +10,9 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Default, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct ForkData { diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index f781aacabd..ae9fff5092 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -12,9 +12,9 @@ use tree_hash::{PackedEncoding, TreeHash}; pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] -#[derive(arbitrary::Arbitrary)] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); impl Graffiti { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 3a02810bba..55377f2489 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -9,19 +9,12 @@ use tree_hash_derive::TreeHash; /// Historical block and state roots. /// /// Spec v0.12.1 -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] -#[arbitrary(bound = "E: EthSpec")] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] pub struct HistoricalBatch { #[test_random(default)] diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 7ad423dade..0aad2d903d 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -13,6 +13,7 @@ use tree_hash_derive::TreeHash; /// in the Capella hard fork. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#historicalsummary +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, @@ -27,7 +28,6 @@ use tree_hash_derive::TreeHash; Clone, Copy, Default, - arbitrary::Arbitrary, )] #[context_deserialize(ForkName)] pub struct HistoricalSummary { diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index ea65d78504..4526b165c8 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -29,31 +29,28 @@ use tree_hash_derive::TreeHash; Encode, TestRandom, Derivative, - arbitrary::Arbitrary, TreeHash, ), context_deserialize(ForkName), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), ) )] -#[derive( - Debug, - Clone, - Serialize, - TreeHash, - Encode, - Derivative, - Deserialize, - arbitrary::Arbitrary, - PartialEq, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Derivative, Deserialize, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. #[superstruct(only(Base), partial_getter(rename = "attesting_indices_base"))] diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index e82b34cc8c..7e170365b2 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -29,22 +29,27 @@ use tree_hash_derive::TreeHash; Decode, Encode, TestRandom, - arbitrary::Arbitrary, TreeHash, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), context_deserialize(ForkName), ) )] -#[derive( - Debug, Clone, Serialize, TreeHash, Encode, Deserialize, arbitrary::Arbitrary, PartialEq, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Deserialize, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct LightClientBootstrap { /// The requested beacon block header. #[superstruct(only(Altair), partial_getter(rename = "header_altair"))] diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 2125b4668b..0f572a856f 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -28,20 +28,27 @@ use tree_hash_derive::TreeHash; Decode, Encode, TestRandom, - arbitrary::Arbitrary, TreeHash, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), context_deserialize(ForkName), ) )] -#[derive(Debug, Clone, Serialize, Encode, TreeHash, arbitrary::Arbitrary, PartialEq)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Serialize, Encode, TreeHash, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct LightClientFinalityUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. #[superstruct(only(Altair), partial_getter(rename = "attested_header_altair"))] diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 36f2932ecd..c36a1c2111 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -30,20 +30,27 @@ use tree_hash_derive::TreeHash; Decode, Encode, TestRandom, - arbitrary::Arbitrary, TreeHash, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), context_deserialize(ForkName), ) )] -#[derive(Debug, Clone, Serialize, TreeHash, Encode, arbitrary::Arbitrary, PartialEq)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct LightClientHeader { pub beacon: BeaconBlockHeader, @@ -68,7 +75,7 @@ pub struct LightClientHeader { #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[serde(skip)] - #[arbitrary(default)] + #[cfg_attr(feature = "arbitrary", arbitrary(default))] pub _phantom_data: PhantomData, } diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 13e308cd27..1bff0df061 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -31,20 +31,27 @@ use tree_hash_derive::TreeHash; Decode, Encode, TestRandom, - arbitrary::Arbitrary, TreeHash, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), context_deserialize(ForkName), ) )] -#[derive(Debug, Clone, Serialize, Encode, TreeHash, arbitrary::Arbitrary, PartialEq)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Serialize, Encode, TreeHash, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct LightClientOptimisticUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. #[superstruct(only(Altair), partial_getter(rename = "attested_header_altair"))] diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 92aeeb33bb..87976dbedb 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -112,20 +112,27 @@ impl From for Error { Decode, Encode, TestRandom, - arbitrary::Arbitrary, TreeHash, ), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), context_deserialize(ForkName), ) )] -#[derive(Debug, Clone, Serialize, Encode, TreeHash, arbitrary::Arbitrary, PartialEq)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Serialize, Encode, TreeHash, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct LightClientUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. #[superstruct(only(Altair), partial_getter(rename = "attested_header_altair"))] diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index e94e56f0cd..3e29ca83e8 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -7,7 +7,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] -#[derive(arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct ParticipationFlags { #[serde(with = "serde_utils::quoted_u8")] bits: u8, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index c0262a2cf8..1f7edfcaca 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -49,6 +49,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + } /// `ExecPayload` functionality the requires ownership. +#[cfg(feature = "arbitrary")] pub trait OwnedExecPayload: ExecPayload + Default @@ -61,7 +62,7 @@ pub trait OwnedExecPayload: + 'static { } - +#[cfg(feature = "arbitrary")] impl OwnedExecPayload for P where P: ExecPayload + Default @@ -75,6 +76,25 @@ impl OwnedExecPayload for P where { } +/// `ExecPayload` functionality the requires ownership. +#[cfg(not(feature = "arbitrary"))] +pub trait OwnedExecPayload: + ExecPayload + Default + Serialize + DeserializeOwned + Encode + Decode + TestRandom + 'static +{ +} +#[cfg(not(feature = "arbitrary"))] +impl OwnedExecPayload for P where + P: ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + 'static +{ +} + pub trait AbstractExecPayload: ExecPayload + Sized @@ -135,11 +155,14 @@ pub trait AbstractExecPayload: TestRandom, TreeHash, Derivative, - arbitrary::Arbitrary, ), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), ssz(struct_behaviour = "transparent"), ), ref_attributes( @@ -152,10 +175,14 @@ pub trait AbstractExecPayload: cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct FullPayload { #[superstruct( @@ -496,11 +523,14 @@ impl TryFrom> for FullPayload { TestRandom, TreeHash, Derivative, - arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), ssz(struct_behaviour = "transparent"), ), ref_attributes( @@ -512,10 +542,14 @@ impl TryFrom> for FullPayload { cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct BlindedPayload { #[superstruct( diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index b7b4a19f4b..4a00a0495a 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -9,19 +9,12 @@ use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] -#[arbitrary(bound = "E: EthSpec")] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] pub struct PendingAttestation { pub aggregation_bits: BitList, diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/pending_consolidation.rs index 9a513f2744..4072c15564 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/pending_consolidation.rs @@ -6,19 +6,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct PendingConsolidation { diff --git a/consensus/types/src/pending_deposit.rs b/consensus/types/src/pending_deposit.rs index 970c326467..4a921edd54 100644 --- a/consensus/types/src/pending_deposit.rs +++ b/consensus/types/src/pending_deposit.rs @@ -5,18 +5,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct PendingDeposit { diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs index ca49032859..e9b10f79b5 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -6,19 +6,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct PendingPartialWithdrawal { diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index 7b03dbb83e..f4d914c1e5 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -10,19 +10,9 @@ use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct ProposerSlashing { diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/relative_epoch.rs index 77a46b56e8..2fa0ae41bd 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/relative_epoch.rs @@ -18,7 +18,8 @@ impl From for Error { /// to and following some epoch. /// /// Spec v0.12.1 -#[derive(Debug, PartialEq, Clone, Copy, arbitrary::Arbitrary)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone, Copy)] pub enum RelativeEpoch { /// The prior epoch. Previous, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index c80a00c3d1..e471457c25 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -6,7 +6,8 @@ use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use std::cmp; -#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Debug, Clone)] pub struct SelectionProof(Signature); impl SelectionProof { diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 7b1f97e521..758ac2734b 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -21,7 +21,6 @@ use tree_hash_derive::TreeHash; variants(Base, Electra), variant_attributes( derive( - arbitrary::Arbitrary, Debug, Clone, PartialEq, @@ -34,19 +33,25 @@ use tree_hash_derive::TreeHash; ), context_deserialize(ForkName), serde(bound = "E: EthSpec"), - arbitrary(bound = "E: EthSpec"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec"), + ), ), map_into(Attestation), map_ref_into(AggregateAndProofRef) )] -#[derive( - arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, TreeHash, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, TreeHash)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -#[arbitrary(bound = "E: EthSpec")] pub struct SignedAggregateAndProof { /// The `AggregateAndProof` that was signed. #[superstruct(flatten)] diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 64dce93aef..4a0a8c6ead 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -11,7 +11,8 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[derive(arbitrary::Arbitrary, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); impl fmt::Debug for SignedBeaconBlockHash { @@ -51,24 +52,29 @@ impl From for Hash256 { Decode, TreeHash, Derivative, - arbitrary::Arbitrary, TestRandom ), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec, Payload: AbstractExecPayload"), - arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), + ), ), map_into(BeaconBlock), map_ref_into(BeaconBlockRef), map_ref_mut_into(BeaconBlockRefMut) )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] -#[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct SignedBeaconBlock = FullPayload> { diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 9106fa8372..77ca96b2a7 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -11,19 +11,9 @@ use tree_hash_derive::TreeHash; /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Eq, - Hash, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct SignedBeaconBlockHeader { diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 383663e36b..910c4c7d7e 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -5,19 +5,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct SignedBlsToExecutionChange { diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 42115bfbc0..51c453d32f 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -11,20 +11,13 @@ use tree_hash_derive::TreeHash; /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, - arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct SignedContributionAndProof { /// The `ContributionAndProof` that was signed. diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index b6451d3ab5..02213ed311 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -10,18 +10,9 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct SignedVoluntaryExit { diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index aa25ecffd9..69b7dabfe5 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -8,18 +8,8 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, -)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] pub struct SigningData { pub object_root: Hash256, diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 66790a9641..857044f981 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -23,35 +23,13 @@ use std::hash::Hash; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -#[derive( - arbitrary::Arbitrary, - Clone, - Copy, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, -)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct Slot(#[serde(with = "serde_utils::quoted_u64")] u64); -#[derive( - arbitrary::Arbitrary, - Clone, - Copy, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, -)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct Epoch(#[serde(with = "serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 7a5357c6cc..7289a817a3 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -22,7 +22,8 @@ static SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { v }); -#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 4f810db22a..7a4ef8f026 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -21,22 +21,16 @@ impl From for Error { Error::ArithError(e) } } - +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] #[derive( - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - Derivative, - arbitrary::Arbitrary, + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, )] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct SyncAggregate { pub sync_committee_bits: BitVector, diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index a61cd47d04..a280369fea 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -6,18 +6,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Clone, - Serialize, - Deserialize, - Hash, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct SyncAggregatorSelectionData { diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index c7ec7bdcc3..a9fde42554 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -25,20 +25,13 @@ impl From for Error { } } -#[derive( - Debug, - PartialEq, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct SyncCommittee { pub pubkeys: FixedVector, diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index e2ac414cfa..bad7797e30 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -15,20 +15,13 @@ pub enum Error { } /// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - arbitrary::Arbitrary, +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") )] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[serde(bound = "E: EthSpec")] -#[arbitrary(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct SyncCommitteeContribution { pub slot: Slot, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 4b442b3053..d5bb7250bb 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -10,18 +10,8 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// The data upon which a `SyncCommitteeContribution` is based. -#[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, -)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] pub struct SyncCommitteeMessage { pub slot: Slot, diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 4adb90b26e..6387212d94 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -11,7 +11,8 @@ use ssz::Encode; use ssz_types::typenum::Unsigned; use std::cmp; -#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Debug, Clone)] pub struct SyncSelectionProof(Signature); impl SyncSelectionProof { diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 245ac5a6c4..1ce7d0c13f 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -18,7 +18,8 @@ static SYNC_SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { v }); -#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 165f477ff4..dc97c8821b 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -11,18 +11,9 @@ use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Eq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash, )] #[context_deserialize(ForkName)] pub struct Validator { diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 75260add4b..6090035038 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -12,18 +12,9 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct VoluntaryExit { diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 9ca50fccfb..ef4a1f285d 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -5,19 +5,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct Withdrawal { diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal_request.rs index 57c6e798eb..c08921a68c 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal_request.rs @@ -7,19 +7,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( - arbitrary::Arbitrary, - Debug, - PartialEq, - Eq, - Hash, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[context_deserialize(ForkName)] pub struct WithdrawalRequest { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index a6549f5574..ca7b24506f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "7.1.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.83.0" +rust-version = "1.87.0" # Prevent cargo-udeps from flagging the dummy package `target_check`, which exists only # to assert properties of the compilation target.