From e2acce94680041c34341fd27add39a3d3ba9e747 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 25 Mar 2025 16:53:10 +1100 Subject: [PATCH] Mallory - Single commit --- .github/ISSUE_TEMPLATE.md | 21 - .github/mergify.yml | 4 + .github/workflows/test-suite.yml | 3 + Cargo.lock | 2262 +++++++++-------- Cargo.toml | 16 +- beacon_node/Cargo.toml | 2 +- .../beacon_chain/src/beacon_block_streamer.rs | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 382 +-- .../beacon_chain/src/beacon_proposer_cache.rs | 101 +- .../beacon_chain/src/blob_verification.rs | 79 +- .../beacon_chain/src/block_verification.rs | 80 +- beacon_node/beacon_chain/src/builder.rs | 3 +- .../beacon_chain/src/canonical_head.rs | 64 +- beacon_node/beacon_chain/src/chain_config.rs | 3 + .../src/data_availability_checker.rs | 71 +- .../overflow_lru_cache.rs | 247 +- .../state_lru_cache.rs | 10 - .../src/data_column_verification.rs | 217 +- beacon_node/beacon_chain/src/errors.rs | 17 + .../beacon_chain/src/historical_blocks.rs | 2 +- beacon_node/beacon_chain/src/kzg_utils.rs | 39 +- beacon_node/beacon_chain/src/metrics.rs | 20 +- .../beacon_chain/src/state_advance_timer.rs | 36 +- beacon_node/beacon_chain/src/test_utils.rs | 2 +- .../beacon_chain/src/validator_custody.rs | 10 +- .../beacon_chain/src/validator_monitor.rs | 8 +- .../beacon_chain/tests/block_verification.rs | 38 +- beacon_node/beacon_chain/tests/store_tests.rs | 400 +++ .../beacon_chain/tests/validator_monitor.rs | 29 +- beacon_node/beacon_processor/src/lib.rs | 36 +- .../src/scheduler/work_reprocessing_queue.rs | 143 +- beacon_node/client/src/builder.rs | 2 +- .../src/compute_light_client_updates.rs | 4 +- beacon_node/execution_layer/src/lib.rs | 14 +- beacon_node/http_api/src/block_id.rs | 71 +- beacon_node/http_api/src/lib.rs | 55 +- beacon_node/http_api/src/proposer_duties.rs | 57 +- beacon_node/http_api/src/publish_blocks.rs | 2 +- beacon_node/http_api/tests/tests.rs | 147 +- beacon_node/lighthouse_network/src/config.rs | 6 +- .../lighthouse_network/src/discovery/mod.rs | 10 +- beacon_node/lighthouse_network/src/lib.rs | 14 +- .../lighthouse_network/src/mallory_config.rs | 37 + .../src/peer_manager/mod.rs | 69 +- .../src/peer_manager/peerdb/client.rs | 8 +- .../lighthouse_network/src/rpc/codec.rs | 35 +- .../lighthouse_network/src/rpc/handler.rs | 27 +- .../lighthouse_network/src/rpc/methods.rs | 48 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 72 +- .../lighthouse_network/src/rpc/protocol.rs | 7 + .../src/rpc/rate_limiter.rs | 45 +- .../src/service/api_types.rs | 9 +- .../lighthouse_network/src/service/mallory.rs | 191 ++ .../lighthouse_network/src/service/mod.rs | 111 +- .../lighthouse_network/src/service/utils.rs | 4 +- .../lighthouse_network/src/types/topics.rs | 4 +- beacon_node/lighthouse_tracing/src/lib.rs | 5 + beacon_node/network/src/lib.rs | 2 +- .../gossip_methods.rs | 72 +- .../src/network_beacon_processor/mod.rs | 82 +- .../network_beacon_processor/rpc_methods.rs | 6 +- .../network_beacon_processor/sync_methods.rs | 226 +- .../src/network_beacon_processor/tests.rs | 38 +- beacon_node/network/src/router.rs | 1 + beacon_node/network/src/service.rs | 13 + .../src/subnet_service/attestation_subnets.rs | 681 ----- .../src/subnet_service/sync_subnets.rs | 345 --- .../network/src/sync/block_lookups/mod.rs | 35 +- .../sync/block_lookups/single_block_lookup.rs | 2 +- beacon_node/network/src/sync/manager.rs | 8 +- .../network/src/sync/network_context.rs | 32 +- beacon_node/network/src/sync/tests/lookups.rs | 90 +- beacon_node/operation_pool/src/lib.rs | 55 +- beacon_node/src/cli.rs | 78 +- beacon_node/src/config.rs | 39 +- beacon_node/store/src/config.rs | 4 +- beacon_node/store/src/hot_cold_store.rs | 215 +- book/src/advanced_database_migrations.md | 2 + book/src/help_bn.md | 11 +- boot_node/Cargo.toml | 2 +- common/eth2/src/lib.rs | 42 +- common/eth2/src/types.rs | 15 + .../holesky/config.yaml | 33 +- .../hoodi/config.yaml | 34 +- .../sepolia/config.yaml | 34 +- common/eth2_network_config/src/lib.rs | 2 +- common/lighthouse_version/src/lib.rs | 6 +- common/logging/src/tracing_logging_layer.rs | 114 +- common/network_utils/src/discovery_metrics.rs | 3 +- common/task_executor/Cargo.toml | 2 + common/task_executor/src/lib.rs | 50 +- .../task_executor/src/rayon_pool_provider.rs | 58 + .../src/proto_array_fork_choice.rs | 50 + consensus/safe_arith/Cargo.toml | 8 - consensus/safe_arith/src/iter.rs | 70 - consensus/safe_arith/src/lib.rs | 166 -- consensus/state_processing/src/all_caches.rs | 9 +- consensus/state_processing/src/epoch_cache.rs | 4 +- .../state_processing/src/upgrade/fulu.rs | 4 +- consensus/types/src/beacon_block.rs | 1 + consensus/types/src/beacon_response.rs | 24 +- consensus/types/src/beacon_state.rs | 104 +- consensus/types/src/chain_spec.rs | 76 +- consensus/types/src/data_column_sidecar.rs | 1 + consensus/types/src/epoch_cache.rs | 10 +- consensus/types/src/fork_name.rs | 42 +- consensus/types/src/preset.rs | 3 + lcli/Cargo.toml | 2 +- lcli/src/http_sync.rs | 2 +- lighthouse/Cargo.toml | 2 +- lighthouse/src/main.rs | 73 +- lighthouse/tests/beacon_node.rs | 124 +- scripts/print_release_diffs.py | 72 + .../tests/checkpoint-sync-config-devnet.yaml | 4 + scripts/tests/checkpoint-sync.sh | 2 +- testing/ef_tests/src/cases/fork.rs | 2 +- testing/simulator/src/checks.rs | 2 +- wordlist.txt | 1 + 118 files changed, 4753 insertions(+), 3938 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 beacon_node/lighthouse_network/src/mallory_config.rs create mode 100644 beacon_node/lighthouse_network/src/service/mallory.rs delete mode 100644 beacon_node/network/src/subnet_service/attestation_subnets.rs delete mode 100644 beacon_node/network/src/subnet_service/sync_subnets.rs create mode 100644 common/task_executor/src/rayon_pool_provider.rs delete mode 100644 consensus/safe_arith/Cargo.toml delete mode 100644 consensus/safe_arith/src/iter.rs delete mode 100644 consensus/safe_arith/src/lib.rs create mode 100644 scripts/print_release_diffs.py diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index d73b9ff6f0..0000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,21 +0,0 @@ -## Description - -Please provide a brief description of the issue. - -## Version - -Please provide your Lighthouse and Rust version. Are you building from -`stable` or `unstable`, which commit? - -## Present Behaviour - -Describe the present behaviour of the application, with regards to this -issue. - -## Expected Behaviour - -How _should_ the application behave? - -## Steps to resolve - -Please describe the steps required to resolve this issue, if known. diff --git a/.github/mergify.yml b/.github/mergify.yml index 4ab73bcf07..0b917b2546 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -105,6 +105,10 @@ queue_rules: {{ body | get_section("## Proposed Changes", "") }} + + {% for commit in commits | unique(attribute='email_author') %} + Co-Authored-By: {{ commit.author }} <{{ commit.email_author }}> + {% endfor %} queue_conditions: - "#approved-reviews-by >= 1" - "check-success=license/cla" diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 59a045c7d3..0201bf9ae3 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -225,6 +225,7 @@ jobs: TEST_FEATURES: portable CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs - name: Upload logs + if: always() uses: actions/upload-artifact@v4 with: name: network_test_logs @@ -328,6 +329,7 @@ jobs: - name: Run a basic beacon chain sim that starts from Deneb run: cargo run --release --bin simulator basic-sim --disable-stdout-logging --log-dir ${{ runner.temp }}/basic_simulator_logs - name: Upload logs + if: always() uses: actions/upload-artifact@v4 with: name: basic_simulator_logs @@ -349,6 +351,7 @@ jobs: - name: Run a beacon chain sim which tests VC fallback behaviour run: cargo run --release --bin simulator fallback-sim --disable-stdout-logging --log-dir ${{ runner.temp }}/fallback_simulator_logs - name: Upload logs + if: always() uses: actions/upload-artifact@v4 with: name: fallback_simulator_logs diff --git a/Cargo.lock b/Cargo.lock index 88b5b7b57d..6c9d12777f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,7 +37,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.9.0", + "rand 0.9.2", "regex", "rpassword", "serde", @@ -48,20 +48,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -70,7 +61,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array 0.14.9", ] [[package]] @@ -113,14 +104,14 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -148,11 +139,11 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-trie", - "auto_impl 1.2.1", + "auto_impl 1.3.0", "derive_more 2.0.1", "either", "once_cell", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -164,14 +155,14 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "crc", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] name = "alloy-eip2930" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe3e16484669964c26ac48390245d84c410b1a5f968976076c17184725ef235" +checksum = "7b82752a889170df67bbb36d42ca63c531eb16274f0d7299ae2a680facba17bd" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -179,13 +170,13 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804cefe429015b4244966c006d25bda5545fa9db5990e9c9079faf255052f50a" +checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16" dependencies = [ "alloy-primitives", "alloy-rlp", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -199,36 +190,35 @@ dependencies = [ "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "auto_impl 1.2.1", + "auto_impl 1.3.0", "derive_more 2.0.1", "either", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] name = "alloy-primitives" -version = "1.0.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70b98b99c1dcfbe74d7f0b31433ff215e7d1555e367d90e62db904f3c9d4ff53" +checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" dependencies = [ "alloy-rlp", "arbitrary", "bytes", "cfg-if", "const-hex", - "derive_arbitrary", "derive_more 2.0.1", - "foldhash", - "getrandom 0.3.1", - "hashbrown 0.15.2", - "indexmap 2.8.0", + "foldhash 0.2.0", + "getrandom 0.3.4", + "hashbrown 0.16.0", + "indexmap 2.11.4", "itoa", "k256 0.13.4", "keccak-asm", "paste", "proptest", "proptest-derive", - "rand 0.9.0", + "rand 0.9.2", "ruint", "rustc-hash 2.1.1", "serde", @@ -238,9 +228,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -249,13 +239,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -273,12 +263,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -296,9 +280,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -311,50 +295,50 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" dependencies = [ "anstyle", - "once_cell", - "windows-sys 0.59.0", + "once_cell_polyfill", + "windows-sys 0.60.2", ] [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" dependencies = [ "derive_arbitrary", ] @@ -412,6 +396,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + [[package]] name = "ark-ff-asm" version = "0.3.0" @@ -432,6 +436,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.106", +] + [[package]] name = "ark-ff-macros" version = "0.3.0" @@ -457,6 +471,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -478,6 +505,18 @@ dependencies = [ "num-bigint", ] +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint", +] + [[package]] name = "ark-std" version = "0.3.0" @@ -498,6 +537,16 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arraydeque" version = "0.5.1" @@ -528,7 +577,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -540,7 +589,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "synstructure", ] @@ -552,7 +601,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -584,9 +633,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -596,32 +645,20 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix 1.1.2", "slab", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "async-lock" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" -dependencies = [ - "event-listener 5.4.0", - "event-listener-strategy", - "pin-project-lite", + "windows-sys 0.61.2", ] [[package]] @@ -643,18 +680,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -694,7 +731,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ "base64 0.22.1", - "http 1.3.0", + "http 1.3.1", "log", "url", ] @@ -713,20 +750,20 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" @@ -738,7 +775,7 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", "itoa", @@ -764,7 +801,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", "mime", @@ -775,21 +812,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] - [[package]] name = "base-x" version = "0.2.11" @@ -808,6 +830,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base256emoji" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" +dependencies = [ + "const-str", + "match-lookup", +] + [[package]] name = "base58" version = "0.1.0" @@ -850,9 +882,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb97d56060ee67d285efb8001fec9d2a4c710c32efd2e14b5cbb5ba71930fc2d" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "beacon_chain" @@ -889,9 +921,9 @@ dependencies = [ "once_cell", "oneshot_broadcast", "operation_pool", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "rayon", "safe_arith", "sensitive_url", @@ -918,7 +950,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.1.0" +version = "8.0.0-rc.1" dependencies = [ "account_utils", "beacon_chain", @@ -933,7 +965,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.6.0", + "hyper 1.7.0", "lighthouse_network", "monitoring_api", "network_utils", @@ -979,7 +1011,7 @@ dependencies = [ "logging", "metrics", "num_cpus", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "serde", "slot_clock", "strum", @@ -1011,7 +1043,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.12.1", @@ -1024,7 +1056,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn 2.0.106", "which", ] @@ -1051,9 +1083,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "bitvec" @@ -1117,7 +1149,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "block-padding 0.2.1", - "generic-array 0.14.7", + "generic-array 0.14.9", ] [[package]] @@ -1126,7 +1158,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", ] [[package]] @@ -1156,7 +1188,7 @@ dependencies = [ "ethereum_ssz", "fixed_bytes", "hex", - "rand 0.9.0", + "rand 0.9.2", "safe_arith", "serde", "tree_hash", @@ -1165,9 +1197,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" dependencies = [ "cc", "glob", @@ -1193,7 +1225,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.1.0" +version = "8.0.0-rc.1" dependencies = [ "beacon_node", "bytes", @@ -1243,9 +1275,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1296,11 +1328,10 @@ dependencies = [ [[package]] name = "c-kzg" -version = "2.1.0" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7e3c397401eb76228c89561cf22f85f41c95aa799ee9d860de3ea1cbc728fc" +checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" dependencies = [ - "arbitrary", "blst", "cc", "glob", @@ -1310,11 +1341,11 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.9" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -1334,10 +1365,10 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -1348,10 +1379,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.16" +version = "1.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -1368,9 +1400,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -1404,11 +1436,10 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", @@ -1449,7 +1480,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", ] [[package]] @@ -1476,9 +1507,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.32" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" +checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" dependencies = [ "clap_builder", "clap_derive", @@ -1486,9 +1517,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.32" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" +checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" dependencies = [ "anstream", "anstyle", @@ -1499,21 +1530,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "clap_utils" @@ -1555,7 +1586,7 @@ dependencies = [ "monitoring_api", "network", "operation_pool", - "rand 0.9.0", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", @@ -1593,12 +1624,12 @@ dependencies = [ "bs58 0.4.0", "coins-core", "digest 0.10.7", - "getrandom 0.2.15", + "getrandom 0.2.16", "hmac 0.12.1", "k256 0.11.6", "lazy_static", "serde", - "sha2 0.10.8", + "sha2 0.10.9", "thiserror 1.0.69", ] @@ -1610,12 +1641,12 @@ checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" dependencies = [ "bitvec 0.17.4", "coins-bip32", - "getrandom 0.2.15", + "getrandom 0.2.16", "hex", "hmac 0.12.1", "pbkdf2 0.11.0", "rand 0.8.5", - "sha2 0.10.8", + "sha2 0.10.9", "thiserror 1.0.69", ] @@ -1630,21 +1661,21 @@ dependencies = [ "bech32", "blake2", "digest 0.10.7", - "generic-array 0.14.7", + "generic-array 0.14.9", "hex", "ripemd", "serde", "serde_derive", - "sha2 0.10.8", + "sha2 0.10.9", "sha3 0.10.8", "thiserror 1.0.69", ] [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" @@ -1721,15 +1752,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" dependencies = [ "cfg-if", "cpufeatures", - "hex", "proptest", - "serde", + "serde_core", ] [[package]] @@ -1739,10 +1769,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] -name = "const_format" -version = "0.2.34" +name = "const-str" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + +[[package]] +name = "const_format" +version = "0.2.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ "const_format_proc_macros", ] @@ -1846,9 +1882,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -1861,9 +1897,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1946,9 +1982,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" @@ -1956,7 +1992,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", "rand_core 0.6.4", "subtle", "zeroize", @@ -1968,7 +2004,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", "rand_core 0.6.4", "subtle", "zeroize", @@ -1980,7 +2016,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", "rand_core 0.6.4", "typenum", ] @@ -1991,7 +2027,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", "subtle", ] @@ -2015,12 +2051,13 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.5" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" +checksum = "881c5d0a13b2f1498e2306e82cbada78390e152d4b1378fb28a84f4dcd0dc4f3" dependencies = [ - "nix 0.29.0", - "windows-sys 0.59.0", + "dispatch", + "nix 0.30.1", + "windows-sys 0.61.2", ] [[package]] @@ -2047,7 +2084,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2062,12 +2099,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", + "darling_core 0.20.11", + "darling_macro 0.20.11", ] [[package]] @@ -2086,16 +2123,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2111,13 +2148,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core 0.20.10", + "darling_core 0.20.11", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2142,15 +2179,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "data-encoding-macro" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9724adfcf41f45bf652b3995837669d73c4d49a1b5ac1ff82905ac7d9b5558" +checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2158,12 +2195,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" +checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2191,9 +2228,9 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" +checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" dependencies = [ "futures", "tokio", @@ -2226,9 +2263,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", "pem-rfc7468", @@ -2251,9 +2288,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" dependencies = [ "powerfmt", ] @@ -2271,26 +2308,26 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "derive_more" -version = "0.99.19" +version = "0.99.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2319,7 +2356,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2330,7 +2367,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "unicode-xid", ] @@ -2349,7 +2386,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", ] [[package]] @@ -2395,9 +2432,8 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" +version = "0.10.0" +source = "git+https://github.com/sigp/discv5?branch=mallory#13e0721b8a6410a73d5ab9705f75dbff2e021a86" dependencies = [ "aes 0.8.4", "aes-gcm", @@ -2416,16 +2452,22 @@ dependencies = [ "lru", "more-asserts", "multiaddr", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "uint 0.10.0", "zeroize", ] +[[package]] +name = "dispatch" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b" + [[package]] name = "displaydoc" version = "0.2.5" @@ -2434,7 +2476,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2446,7 +2488,7 @@ dependencies = [ "eth2", "futures", "logging", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "slot_clock", "task_executor", "tokio", @@ -2485,7 +2527,7 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.9", + "der 0.7.10", "digest 0.10.7", "elliptic-curve 0.13.8", "rfc6979 0.4.0", @@ -2505,15 +2547,15 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.8", + "sha2 0.10.9", "subtle", "zeroize", ] @@ -2527,7 +2569,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2565,9 +2607,9 @@ dependencies = [ [[package]] name = "eip4844" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa86cda6af15a9a5e4cf680850addaee8cd427be95be3ec9d022b9d7b98a66c0" +checksum = "82ab45fc63db6bbe5c3eb7c79303b2aff7ee529c991b2111c46879d1ea38407e" dependencies = [ "ekzg-bls12-381", "ekzg-maybe-rayon", @@ -2579,7 +2621,7 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -2590,9 +2632,9 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "ekzg-bls12-381" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f0e00a7689af7f4f17e85ae07f5a92b568a47297a165f685b828edfd82e02b" +checksum = "05c599a59deba6188afd9f783507e4d89efc997f0fa340a758f0d0992b322416" dependencies = [ "blst", "blstrs", @@ -2604,9 +2646,9 @@ dependencies = [ [[package]] name = "ekzg-erasure-codes" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bfc7ab684a7bb0c5ee37fd6a73da7425858cdd28f4a285c70361f001d6d0efc" +checksum = "8474a41a30ddd2b651798b1aa9ce92011207c3667186fe9044184683250109e7" dependencies = [ "ekzg-bls12-381", "ekzg-polynomial", @@ -2614,27 +2656,27 @@ dependencies = [ [[package]] name = "ekzg-maybe-rayon" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0a4876a612b9317be470768e134b671b8e645e412a82eb12fdd9b1958fa6f9" +checksum = "9cf94d1385185c1f7caef4973be49702c7d9ffdeaf832d126dbb9ed6efe09d40" [[package]] name = "ekzg-multi-open" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7964754aa0921aaa89b1589100e4cae9b31f87f137eeb0af5403fdfca68bfc" +checksum = "e6d37456a32cf79bdbddd6685a2adec73210e2d60332370bc0e9a502b6d93beb" dependencies = [ "ekzg-bls12-381", "ekzg-maybe-rayon", "ekzg-polynomial", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] name = "ekzg-polynomial" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed36d2ddf86661c9d18e9d5dfc47dce6c9b6e44db385e2da71952b10ba32df1" +checksum = "704751bac85af4754bb8a14457ef24d820738062d0b6f3763534d0980b1a1e81" dependencies = [ "ekzg-bls12-381", "ekzg-maybe-rayon", @@ -2642,9 +2684,9 @@ dependencies = [ [[package]] name = "ekzg-serialization" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c83402d591ac3534d1ae654feb8f56ee64cc2bacfe80bece7977c24ca5e72e2" +checksum = "3cb983d9f75b2804c00246def8d52c01cf05f70c22593b8d314fbcf0cf89042b" dependencies = [ "ekzg-bls12-381", "hex", @@ -2652,9 +2694,9 @@ dependencies = [ [[package]] name = "ekzg-single-open" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e1dbb13023ccebbb24593e4753c87f77b7fb78254a20aef1a028e979145092" +checksum = "799d5806d51e1453fa0f528d6acf4127e2a89e98312c826151ebc24ee3448ec3" dependencies = [ "ekzg-bls12-381", "ekzg-polynomial", @@ -2663,9 +2705,9 @@ dependencies = [ [[package]] name = "ekzg-trusted-setup" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff1cb3e907b27fa51f35def95eeabe47e97765e2b6bac7e55967500937f94282" +checksum = "85314d56718dc2c6dd77c3b3630f1839defcb6f47d9c20195608a0f7976095ab" dependencies = [ "ekzg-bls12-381", "ekzg-serialization", @@ -2685,7 +2727,7 @@ dependencies = [ "der 0.6.1", "digest 0.10.7", "ff 0.12.1", - "generic-array 0.14.7", + "generic-array 0.14.9", "group 0.12.1", "pkcs8 0.9.0", "rand_core 0.6.4", @@ -2704,7 +2746,7 @@ dependencies = [ "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.1", - "generic-array 0.14.7", + "generic-array 0.14.9", "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", @@ -2751,7 +2793,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2771,7 +2813,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2814,12 +2856,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2838,7 +2880,7 @@ dependencies = [ "scrypt 0.10.0", "serde", "serde_json", - "sha2 0.10.8", + "sha2 0.10.9", "sha3 0.10.8", "thiserror 1.0.69", "uuid 0.8.2", @@ -2862,7 +2904,7 @@ dependencies = [ "multiaddr", "pretty_reqwest_error", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "reqwest 0.11.27", "reqwest-eventsource", "sensitive_url", @@ -2919,7 +2961,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.9.0", + "rand 0.9.2", "scrypt 0.7.0", "serde", "serde_json", @@ -2960,7 +3002,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.9.0", + "rand 0.9.2", "serde", "serde_json", "serde_repr", @@ -3076,7 +3118,7 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -3094,9 +3136,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca8ba45b63c389c6e115b095ca16381534fdcc03cf58176a3f8554db2dbe19b" +checksum = "0dcddb2554d19cde19b099fadddde576929d7a4d0c1cd3512d1fd95cf174375c" dependencies = [ "alloy-primitives", "arbitrary", @@ -3110,14 +3152,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" +checksum = "a657b6b3b7e153637dc6bdc6566ad9279d9ee11a15b12cfb24a2e04360637e9f" dependencies = [ - "darling 0.20.10", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -3149,7 +3191,7 @@ dependencies = [ "convert_case 0.6.0", "elliptic-curve 0.12.3", "ethabi 18.0.0", - "generic-array 0.14.7", + "generic-array 0.14.9", "hex", "k256 0.11.6", "open-fastrlp", @@ -3173,9 +3215,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" dependencies = [ "ethers-core", - "getrandom 0.2.15", + "getrandom 0.2.16", "reqwest 0.11.27", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde-aux", "serde_json", @@ -3216,13 +3258,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", - "auto_impl 1.2.1", + "auto_impl 1.3.0", "base64 0.13.1", "ethers-core", "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.15", + "getrandom 0.2.16", "hashers", "hex", "http 0.2.12", @@ -3258,7 +3300,7 @@ dependencies = [ "ethers-core", "hex", "rand 0.8.5", - "sha2 0.10.8", + "sha2 0.10.9", "thiserror 1.0.69", ] @@ -3270,9 +3312,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -3281,11 +3323,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -3351,9 +3393,9 @@ dependencies = [ "logging", "lru", "metrics", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "pretty_reqwest_error", - "rand 0.9.0", + "rand 0.9.2", "reqwest 0.11.27", "sensitive_url", "serde", @@ -3408,7 +3450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl 1.3.0", "bytes", ] @@ -3419,7 +3461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl 1.3.0", "bytes", ] @@ -3484,6 +3526,12 @@ dependencies = [ "windows-acl", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" + [[package]] name = "fixed-hash" version = "0.7.0" @@ -3518,9 +3566,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" dependencies = [ "crc32fast", "libz-sys", @@ -3535,9 +3583,15 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "foreign-types" @@ -3574,9 +3628,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -3670,9 +3724,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "futures-core", "pin-project-lite", @@ -3696,7 +3750,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -3706,7 +3760,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.23", + "rustls 0.23.32", "rustls-pki-types", ] @@ -3755,19 +3809,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generator" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" -dependencies = [ - "cfg-if", - "libc", - "log", - "rustversion", - "windows 0.58.0", -] - [[package]] name = "generic-array" version = "0.12.4" @@ -3779,9 +3820,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" dependencies = [ "typenum", "version_check", @@ -3805,27 +3846,29 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasip2", + "wasm-bindgen", ] [[package]] @@ -3838,12 +3881,6 @@ dependencies = [ "polyval", ] -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - [[package]] name = "git-version" version = "0.3.9" @@ -3861,14 +3898,14 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "graffiti_file" @@ -3908,9 +3945,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -3918,7 +3955,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.8.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -3927,17 +3964,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.3.0", - "indexmap 2.8.0", + "http 1.3.1", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -3946,12 +3983,13 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy", ] [[package]] @@ -3987,13 +4025,22 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +dependencies = [ + "foldhash 0.2.0", "serde", ] @@ -4030,7 +4077,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] [[package]] @@ -4100,24 +4147,15 @@ checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - -[[package]] -name = "hermit-abi" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] [[package]] name = "hex_fmt" @@ -4141,10 +4179,10 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.0", + "rand 0.9.2", "ring", - "socket2", - "thiserror 2.0.12", + "socket2 0.5.10", + "thiserror 2.0.17", "tinyvec", "tokio", "tracing", @@ -4163,11 +4201,11 @@ dependencies = [ "ipconfig", "moka", "once_cell", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot 0.12.5", + "rand 0.9.2", "resolv-conf", "smallvec", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -4209,17 +4247,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "http" version = "0.2.12" @@ -4233,9 +4260,9 @@ dependencies = [ [[package]] name = "http" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a761d192fbf18bdef69f5ceedd0d1333afcbda0ee23840373b8317570d23c65" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -4260,7 +4287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.3.0", + "http 1.3.1", ] [[package]] @@ -4271,7 +4298,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "pin-project-lite", ] @@ -4303,9 +4330,9 @@ dependencies = [ "network", "network_utils", "operation_pool", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "safe_arith", "sensitive_url", "serde", @@ -4362,9 +4389,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" @@ -4376,14 +4403,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -4392,20 +4419,22 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", - "h2 0.4.8", - "http 1.3.0", + "futures-core", + "h2 0.4.12", + "http 1.3.1", "http-body 1.0.1", "httparse", "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -4431,7 +4460,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -4453,18 +4482,23 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.7.0", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -4472,16 +4506,17 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", - "windows-core 0.52.0", + "windows-core 0.62.2", ] [[package]] @@ -4495,21 +4530,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -4518,31 +4554,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -4550,67 +4566,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -4619,9 +4622,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -4630,9 +4633,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -4668,7 +4671,7 @@ dependencies = [ "rtnetlink", "system-configuration 0.6.1", "tokio", - "windows 0.53.0", + "windows", ] [[package]] @@ -4681,12 +4684,12 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.3.0", + "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "tokio", "url", "xmltree", @@ -4707,7 +4710,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.7.4", + "parity-scale-codec 3.7.5", ] [[package]] @@ -4745,7 +4748,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -4760,14 +4763,15 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.8.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -4781,8 +4785,8 @@ dependencies = [ "filesystem", "lockfile", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot 0.12.5", + "rand 0.9.2", "reqwest 0.11.27", "serde", "serde_json", @@ -4802,7 +4806,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ - "generic-array 0.14.7", + "generic-array 0.14.9", ] [[package]] @@ -4852,8 +4856,8 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", - "widestring 1.1.0", + "socket2 0.5.10", + "widestring 1.2.1", "windows-sys 0.48.0", "winreg", ] @@ -4864,13 +4868,23 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.0", + "hermit-abi 0.5.2", "libc", "windows-sys 0.59.0", ] @@ -4925,18 +4939,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -4966,7 +4981,7 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.8", + "sha2 0.10.9", "sha3 0.10.8", ] @@ -4980,7 +4995,7 @@ dependencies = [ "ecdsa 0.16.9", "elliptic-curve 0.13.8", "once_cell", - "sha2 0.10.8", + "sha2 0.10.9", "signature 2.2.0", ] @@ -5051,7 +5066,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.1.0" +version = "8.0.0-rc.1" dependencies = [ "account_utils", "beacon_chain", @@ -5111,25 +5126,25 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.175" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "libm" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libmdbx" @@ -5138,11 +5153,11 @@ source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52be dependencies = [ "bitflags 1.3.2", "byteorder", - "derive_more 0.99.19", + "derive_more 0.99.20", "indexmap 1.9.3", "libc", "mdbx-sys", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "thiserror 1.0.69", ] @@ -5156,7 +5171,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.16", "libp2p-allow-block-list", "libp2p-connection-limits", "libp2p-core", @@ -5175,7 +5190,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -5214,12 +5229,12 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "pin-project", "quick-protobuf", "rand 0.8.5", "rw-stream-sink", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "unsigned-varint 0.8.0", "web-time", @@ -5236,7 +5251,7 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "smallvec", "tracing", ] @@ -5244,19 +5259,21 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.50.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=5acdf89a65d64098f9346efa5769e57bcd19dea9#5acdf89a65d64098f9346efa5769e57bcd19dea9" +source = "git+https://github.com/sigp/rust-libp2p.git?branch=mallory#e96cbb2db1b85fff5765f120a731ff7f200874a8" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "asynchronous-codec", "base64 0.22.1", "byteorder", "bytes", + "delay_map", "either", "fnv", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.16", "hashlink 0.10.0", + "hex", "hex_fmt", "libp2p-core", "libp2p-identity", @@ -5266,7 +5283,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.8", + "sha2 0.10.9", "tracing", "web-time", ] @@ -5288,7 +5305,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -5308,8 +5325,8 @@ dependencies = [ "quick-protobuf", "rand 0.8.5", "sec1 0.7.3", - "sha2 0.10.8", - "thiserror 2.0.12", + "sha2 0.10.9", + "thiserror 2.0.17", "tracing", "zeroize", ] @@ -5328,7 +5345,7 @@ dependencies = [ "libp2p-swarm", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -5351,9 +5368,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aaa6fee3722e355443058472fc4705d78681bc2d8e447a0bdeb3fecf40cd197" +checksum = "95a4019ba30c4e42b776113e9778071691fe3f34bf23b6b3bf0dfcf29d801f3d" dependencies = [ "asynchronous-codec", "bytes", @@ -5361,7 +5378,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "rand 0.8.5", "smallvec", "tracing", @@ -5385,7 +5402,7 @@ dependencies = [ "rand 0.8.5", "snow", "static_assertions", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "x25519-dalek", "zeroize", @@ -5422,9 +5439,9 @@ dependencies = [ "quinn", "rand 0.8.5", "ring", - "rustls 0.23.23", - "socket2", - "thiserror 2.0.12", + "rustls 0.23.32", + "socket2 0.5.10", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5459,7 +5476,7 @@ checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck 0.5.0", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -5473,7 +5490,7 @@ dependencies = [ "if-watch", "libc", "libp2p-core", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -5490,9 +5507,9 @@ dependencies = [ "libp2p-identity", "rcgen", "ring", - "rustls 0.23.23", - "rustls-webpki 0.103.4", - "thiserror 2.0.12", + "rustls 0.23.32", + "rustls-webpki 0.103.7", + "thiserror 2.0.17", "x509-parser", "yasna", ] @@ -5521,19 +5538,19 @@ dependencies = [ "either", "futures", "libp2p-core", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "yamux 0.12.1", - "yamux 0.13.4", + "yamux 0.13.7", ] [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "libc", ] @@ -5550,9 +5567,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -5561,7 +5578,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.1.0" +version = "8.0.0-rc.1" dependencies = [ "account_manager", "account_utils", @@ -5640,11 +5657,11 @@ dependencies = [ "lru_cache", "metrics", "network_utils", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "prometheus-client", "quickcheck", "quickcheck_macros", - "rand 0.9.0", + "rand 0.9.2", "regex", "serde", "sha2 0.9.9", @@ -5680,7 +5697,7 @@ dependencies = [ "futures", "initialized_validators", "logging", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "serde", "signing_method", "slashing_protection", @@ -5715,15 +5732,15 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lmdb-rkv" @@ -5748,23 +5765,22 @@ dependencies = [ [[package]] name = "local-ip-address" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", - "thiserror 1.0.69", + "thiserror 2.0.17", "windows-sys 0.59.0", ] [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -5778,9 +5794,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "logging" @@ -5812,28 +5828,21 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "loom" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" -dependencies = [ - "cfg-if", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber", -] - [[package]] name = "lru" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "lru_cache" version = "0.1.0" @@ -5844,9 +5853,9 @@ dependencies = [ [[package]] name = "mach2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" dependencies = [ "libc", ] @@ -5857,7 +5866,7 @@ version = "0.1.0" dependencies = [ "libc", "metrics", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "tikv-jemalloc-ctl", "tikv-jemallocator", ] @@ -5869,10 +5878,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] -name = "match_cfg" -version = "0.1.0" +name = "match-lookup" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] name = "matchers" @@ -5914,9 +5928,9 @@ checksum = "33746aadcb41349ec291e7f2f0a3aa6834d1d7c58066fb4b01f68efc4c4b7631" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memoffset" @@ -5982,7 +5996,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.13.0", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "rayon", "serde", "smallvec", @@ -6016,22 +6030,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi", + "windows-sys 0.59.0", ] [[package]] @@ -6063,7 +6078,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -6075,7 +6090,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -6088,13 +6103,13 @@ dependencies = [ "bytes", "colored", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "regex", "serde_json", "serde_urlencoded", @@ -6104,21 +6119,20 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.10" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", - "loom", - "parking_lot 0.12.3", + "equivalent", + "parking_lot 0.12.5", "portable-atomic", "rustc_version 0.4.1", "smallvec", "tagptr", - "thiserror 1.0.69", - "uuid 1.15.1", + "uuid 1.18.1", ] [[package]] @@ -6167,11 +6181,12 @@ dependencies = [ [[package]] name = "multibase" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" dependencies = [ "base-x", + "base256emoji", "data-encoding", "data-encoding-macro", ] @@ -6290,7 +6305,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -6339,9 +6354,9 @@ dependencies = [ "matches", "metrics", "operation_pool", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rand_chacha 0.3.1", "rand_chacha 0.9.0", "serde_json", @@ -6368,7 +6383,7 @@ dependencies = [ "lru_cache", "metrics", "multiaddr", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "serde", "tiny-keccak", ] @@ -6397,11 +6412,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.29.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "cfg-if", "cfg_aliases", "libc", @@ -6451,11 +6466,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.50.1" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6524,11 +6539,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] @@ -6542,15 +6557,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "memchr", -] - [[package]] name = "oid-registry" version = "0.8.1" @@ -6562,19 +6568,25 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" dependencies = [ "critical-section", "portable-atomic", ] +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "oneshot_broadcast" version = "0.1.0" dependencies = [ - "parking_lot 0.12.3", + "parking_lot 0.12.5", ] [[package]] @@ -6602,7 +6614,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl 1.3.0", "bytes", "ethereum-types 0.14.1", "open-fastrlp-derive", @@ -6622,11 +6634,11 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "cfg-if", "foreign-types", "libc", @@ -6643,7 +6655,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -6654,18 +6666,18 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.4.2+3.4.1" +version = "300.5.3+3.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168ce4e058f975fe43e89d9ccf78ca668601887ae736090aacc23ae353c298e2" +checksum = "dc6bad8cd0233b63971e232cc9c5e83039375b8586d2312f31fda85db8f888c2" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.107" +version = "0.9.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" +checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2" dependencies = [ "cc", "libc", @@ -6684,7 +6696,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -6696,9 +6708,9 @@ checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" dependencies = [ "async-trait", "bytes", - "http 1.3.0", + "http 1.3.1", "opentelemetry", - "reqwest 0.12.15", + "reqwest 0.12.24", ] [[package]] @@ -6707,14 +6719,14 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "http 1.3.0", + "http 1.3.1", "opentelemetry", "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", "prost", - "reqwest 0.12.15", - "thiserror 2.0.12", + "reqwest 0.12.24", + "thiserror 2.0.17", "tokio", "tonic 0.13.1", "tracing", @@ -6743,9 +6755,9 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.0", + "rand 0.9.2", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -6760,8 +6772,8 @@ dependencies = [ "itertools 0.10.5", "maplit", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot 0.12.5", + "rand 0.9.2", "rayon", "serde", "state_processing", @@ -6788,7 +6800,7 @@ dependencies = [ "ecdsa 0.16.9", "elliptic-curve 0.13.8", "primeorder", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -6816,16 +6828,16 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "const_format", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.7.4", + "parity-scale-codec-derive 3.7.5", "rustversion", "serde", ] @@ -6844,14 +6856,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ - "proc-macro-crate 3.3.0", + "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -6873,12 +6885,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.12", ] [[package]] @@ -6897,15 +6909,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.10", + "redox_syscall 0.5.18", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -6943,17 +6955,17 @@ dependencies = [ "digest 0.10.7", "hmac 0.12.1", "password-hash", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", + "serde_core", ] [[package]] @@ -6967,18 +6979,17 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.7.15" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" dependencies = [ "memchr", - "thiserror 2.0.12", "ucd-trie", ] @@ -7009,7 +7020,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -7040,7 +7051,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.9", + "der 0.7.10", "spki 0.7.3", ] @@ -7086,17 +7097,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi 0.5.2", "pin-project-lite", - "rustix 0.38.44", - "tracing", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.2", ] [[package]] @@ -7124,9 +7134,18 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -7140,7 +7159,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.23", + "zerocopy", ] [[package]] @@ -7179,12 +7198,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.30" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1ccf34da56fc294e7d4ccf69a85992b7dfb826b7cf57bac6a70bba3494cc08a" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -7235,11 +7254,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.22.24", + "toml_edit 0.23.7", ] [[package]] @@ -7268,9 +7287,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] @@ -7300,7 +7319,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "thiserror 1.0.69", ] @@ -7312,7 +7331,7 @@ checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "prometheus-client-derive-encode", ] @@ -7324,23 +7343,23 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "proptest" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.0", + "bitflags 2.9.4", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift 0.3.0", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift 0.4.0", "regex-syntax", "rusty-fork", "tempfile", @@ -7349,13 +7368,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" +checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -7378,7 +7397,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -7411,7 +7430,7 @@ checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", - "derive_more 0.99.19", + "derive_more 0.99.20", "glob", "mach2", "nix 0.24.3", @@ -7462,49 +7481,52 @@ dependencies = [ [[package]] name = "quickcheck_macros" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" +checksum = "f71ee38b42f8459a88d3362be6f9b841ad2d5421844f61eb1c59c11bff3ac14a" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.106", ] [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", + "cfg_aliases", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.23", - "socket2", - "thiserror 2.0.12", + "rustls 0.23.32", + "socket2 0.6.1", + "thiserror 2.0.17", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", "ring", "rustc-hash 2.1.1", - "rustls 0.23.23", + "rustls 0.23.32", "rustls-pki-types", "slab", - "thiserror 2.0.12", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -7512,27 +7534,33 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.1", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.39" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "r2d2" version = "0.8.10" @@ -7540,7 +7568,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "scheduled-thread-pool", ] @@ -7585,14 +7613,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", "serde", - "zerocopy 0.8.23", ] [[package]] @@ -7621,7 +7648,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -7630,7 +7657,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", "serde", ] @@ -7654,9 +7681,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -7664,9 +7691,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -7687,9 +7714,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.4.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" +checksum = "8eca1e9d98d5a7e9002d0013e18d5a9b000aee942eb134883a82f06ebffb6c01" dependencies = [ "libc", ] @@ -7705,11 +7732,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", ] [[package]] @@ -7718,16 +7745,16 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "libredox", "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -7737,9 +7764,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -7748,9 +7775,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" @@ -7763,7 +7790,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", @@ -7800,25 +7827,22 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", - "ipnet", "js-sys", "log", - "mime", - "once_cell", "percent-encoding", "pin-project-lite", "serde", @@ -7827,12 +7851,12 @@ dependencies = [ "sync_wrapper 1.0.2", "tokio", "tower 0.5.2", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", ] [[package]] @@ -7853,13 +7877,9 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] +checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "rfc6979" @@ -7890,7 +7910,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", @@ -7965,28 +7985,29 @@ dependencies = [ [[package]] name = "ruint" -version = "1.14.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a46eb779843b2c4f21fac5773e25d6d5b7c8f0922876c91541790d2ca27eef" +checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" dependencies = [ "alloy-rlp", "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", + "ark-ff 0.5.0", "bytes", "fastrlp 0.3.1", "fastrlp 0.4.0", "num-bigint", "num-integer", "num-traits", - "parity-scale-codec 3.7.4", + "parity-scale-codec 3.7.5", "primitive-types 0.12.2", "proptest", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rlp", "ruint-macro", - "serde", + "serde_core", "valuable", "zeroize", ] @@ -8013,9 +8034,9 @@ dependencies = [ [[package]] name = "rust_eth_kzg" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc46814bb8e72bff20fe117db43b7455112e6fafdae7466f8f24d451ad773c0" +checksum = "1522b7a740cd7f5bc52ea49863618511c8de138dcdf3f8a80b15b3f764942a5b" dependencies = [ "eip4844", "ekzg-bls12-381", @@ -8028,12 +8049,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -8067,7 +8082,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver 1.0.27", ] [[package]] @@ -8099,7 +8114,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.4.15", @@ -8108,15 +8123,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "errno", "libc", - "linux-raw-sys 0.9.2", - "windows-sys 0.59.0", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", ] [[package]] @@ -8147,29 +8162,29 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.8", + "rustls-webpki 0.103.7", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.3.0", + "security-framework 3.5.1", ] [[package]] @@ -8223,9 +8238,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" dependencies = [ "ring", "rustls-pki-types", @@ -8234,15 +8249,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", "quick-error", @@ -8270,6 +8285,8 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "safe_arith" version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b147bb6111014916d3ef9d4c85173124a8e12193a67f6176d67244afd558d6c1" [[package]] name = "salsa20" @@ -8306,7 +8323,7 @@ checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "cfg-if", "derive_more 1.0.0", - "parity-scale-codec 3.7.4", + "parity-scale-codec 3.7.5", "scale-info-derive", ] @@ -8316,19 +8333,19 @@ version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ - "proc-macro-crate 3.3.0", + "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -8337,7 +8354,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot 0.12.3", + "parking_lot 0.12.5", ] [[package]] @@ -8373,7 +8390,7 @@ dependencies = [ "hmac 0.12.1", "pbkdf2 0.11.0", "salsa20 0.10.2", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -8394,7 +8411,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct 0.1.1", "der 0.6.1", - "generic-array 0.14.7", + "generic-array 0.14.9", "pkcs8 0.9.0", "subtle", "zeroize", @@ -8407,8 +8424,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.9", - "generic-array 0.14.7", + "der 0.7.10", + "generic-array 0.14.9", "pkcs8 0.10.2", "subtle", "zeroize", @@ -8420,7 +8437,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -8429,11 +8446,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.3.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -8442,9 +8459,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -8461,11 +8478,12 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] @@ -8493,18 +8511,19 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] [[package]] name = "serde-aux" -version = "4.6.0" +version = "4.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5290c39c5f6992b9dddbda28541d965dba46468294e6018a408fa297e6c602de" +checksum = "207f67b28fe90fb596503a9bf0bf1ea5e831e21307658e177c5dfcdfc3ab8a0a" dependencies = [ "serde", "serde-value", @@ -8532,26 +8551,36 @@ dependencies = [ ] [[package]] -name = "serde_derive" -version = "1.0.219" +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] @@ -8562,7 +8591,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -8583,7 +8612,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.11.4", "itoa", "ryu", "serde", @@ -8628,9 +8657,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -8686,9 +8715,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -8720,7 +8749,7 @@ dependencies = [ "eth2_keystore", "ethereum_serde_utils", "lockfile", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "reqwest 0.11.27", "serde", "task_executor", @@ -8729,6 +8758,12 @@ dependencies = [ "validator_metrics", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "similar" version = "2.7.0" @@ -8743,7 +8778,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -8758,7 +8793,7 @@ dependencies = [ "kzg", "logging", "node_test_rig", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "rayon", "sensitive_url", "serde_json", @@ -8770,12 +8805,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "slasher" @@ -8794,8 +8826,8 @@ dependencies = [ "lru", "maplit", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot 0.12.5", + "rand 0.9.2", "rayon", "redb", "safe_arith", @@ -8849,15 +8881,15 @@ name = "slot_clock" version = "0.2.0" dependencies = [ "metrics", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "types", ] [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "arbitrary", ] @@ -8881,20 +8913,30 @@ dependencies = [ "rand_core 0.6.4", "ring", "rustc_version 0.4.1", - "sha2 0.10.8", + "sha2 0.10.9", "subtle", ] [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "spin" version = "0.9.8" @@ -8918,7 +8960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.9", + "der 0.7.10", ] [[package]] @@ -8940,9 +8982,9 @@ dependencies = [ [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "state_processing" @@ -8960,7 +9002,7 @@ dependencies = [ "itertools 0.10.5", "merkle_proof", "metrics", - "rand 0.9.0", + "rand 0.9.2", "rayon", "safe_arith", "smallvec", @@ -9005,8 +9047,8 @@ dependencies = [ "logging", "lru", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot 0.12.5", + "rand 0.9.2", "redb", "safe_arith", "serde", @@ -9065,16 +9107,16 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "superstruct" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf0f31f730ad9e579364950e10d6172b4a9bd04b447edf5988b066a860cc340e" +checksum = "3b986e4a629907f20a2c2a639a75bc22a8b5d99b444e0d83c395f4cb309022bf" dependencies = [ - "darling 0.13.4", - "itertools 0.10.5", + "darling 0.20.11", + "itertools 0.13.0", "proc-macro2", "quote", "smallvec", - "syn 1.0.109", + "syn 2.0.106", ] [[package]] @@ -9100,9 +9142,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -9126,13 +9168,13 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -9167,7 +9209,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "core-foundation 0.9.4", "system-configuration-sys 0.6.0", ] @@ -9199,7 +9241,7 @@ dependencies = [ "lighthouse_network", "metrics", "network_utils", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "serde", "sysinfo", "types", @@ -9231,32 +9273,33 @@ dependencies = [ "async-channel 1.9.0", "futures", "metrics", + "num_cpus", + "rayon", "tokio", "tracing", ] [[package]] name = "tempfile" -version = "3.18.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c317e0a526ee6120d8dabad239c8dadca62b24b6f168914bbbc8e2fb1f0e567" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ - "cfg-if", "fastrand", - "getrandom 0.3.1", + "getrandom 0.3.4", "once_cell", - "rustix 1.0.2", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.2", ] [[package]] name = "terminal_size" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.0.2", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.60.2", ] [[package]] @@ -9284,11 +9327,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.17", ] [[package]] @@ -9299,28 +9342,27 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -9365,9 +9407,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.39" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -9380,15 +9422,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -9417,7 +9459,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash 1.1.0", - "sha2 0.10.8", + "sha2 0.10.9", "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", @@ -9435,9 +9477,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -9455,9 +9497,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -9470,32 +9512,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.0" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -9531,11 +9572,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.23", + "rustls 0.23.32", "tokio", ] @@ -9553,9 +9594,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -9568,9 +9609,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] [[package]] name = "toml_edit" @@ -9578,20 +9628,30 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.8.0", - "toml_datetime", + "indexmap 2.11.4", + "toml_datetime 0.6.11", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.8.0", - "toml_datetime", - "winnow 0.7.3", + "indexmap 2.11.4", + "toml_datetime 0.7.3", + "toml_parser", + "winnow 0.7.13", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow 0.7.13", ] [[package]] @@ -9605,17 +9665,17 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.8", - "http 1.3.0", + "h2 0.4.12", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "prost", - "socket2", + "socket2 0.5.10", "tokio", "tokio-stream", "tower 0.4.13", @@ -9633,10 +9693,10 @@ dependencies = [ "async-trait", "base64 0.22.1", "bytes", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -9644,7 +9704,7 @@ dependencies = [ "prost", "rustls-native-certs", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.4", "tokio-stream", "tower 0.5.2", "tower-layer", @@ -9680,7 +9740,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.8.0", + "indexmap 2.11.4", "pin-project-lite", "slab", "sync_wrapper 1.0.2", @@ -9691,6 +9751,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.4", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -9729,20 +9807,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -9837,10 +9915,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" dependencies = [ - "darling 0.20.10", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -9855,9 +9933,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" +checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" dependencies = [ "serde", "stable_deref_trait", @@ -9871,9 +9949,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "types" @@ -9903,9 +9981,9 @@ dependencies = [ "merkle_proof", "metastruct", "milhouse", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "paste", - "rand 0.9.0", + "rand 0.9.2", "rand_xorshift 0.4.0", "rayon", "regex", @@ -9978,9 +10056,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-normalization" @@ -10044,21 +10122,16 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -10077,17 +10150,19 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "serde", ] [[package]] name = "uuid" -version = "1.15.1" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", ] [[package]] @@ -10105,12 +10180,12 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.6.0", + "hyper 1.7.0", "initialized_validators", "lighthouse_validator_store", "metrics", "monitoring_api", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "reqwest 0.11.27", "sensitive_url", "serde", @@ -10137,7 +10212,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand 0.9.0", + "rand 0.9.2", "tempfile", "tree_hash", "types", @@ -10166,8 +10241,8 @@ dependencies = [ "lighthouse_validator_store", "lighthouse_version", "logging", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot 0.12.5", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", @@ -10201,7 +10276,7 @@ dependencies = [ "logging", "malloc_utils", "metrics", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "serde", "slot_clock", "tracing", @@ -10258,7 +10333,7 @@ dependencies = [ "futures", "graffiti_file", "logging", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "safe_arith", "slot_clock", "task_executor", @@ -10391,50 +10466,51 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -10445,9 +10521,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10455,22 +10531,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] @@ -10505,9 +10581,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", @@ -10537,7 +10613,7 @@ dependencies = [ "initialized_validators", "lighthouse_validator_store", "logging", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "reqwest 0.11.27", "serde", "serde_json", @@ -10579,9 +10655,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" [[package]] name = "winapi" @@ -10601,11 +10677,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -10624,16 +10700,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" -dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows-acl" version = "0.3.0" @@ -10646,15 +10712,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-core" version = "0.53.0" @@ -10667,55 +10724,44 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.58.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", + "windows-link", + "windows-result 0.4.1", + "windows-strings", ] [[package]] name = "windows-implement" -version = "0.58.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "windows-interface" -version = "0.58.0" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "windows-link" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" - -[[package]] -name = "windows-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" -dependencies = [ - "windows-result 0.3.1", - "windows-strings 0.3.1", - "windows-targets 0.53.2", -] +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" @@ -10728,37 +10774,18 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.2.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06374efe858fab7e4f881500e6e86ec8bc28f9462c47e5a9941a0142ad86b189" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -10799,6 +10826,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -10847,18 +10892,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.2" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -10881,9 +10927,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -10905,9 +10951,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -10929,9 +10975,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -10941,9 +10987,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -10965,9 +11011,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -10989,9 +11035,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -11013,9 +11059,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -11037,9 +11083,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" @@ -11052,9 +11098,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] @@ -11070,13 +11116,10 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.33.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" -dependencies = [ - "bitflags 2.9.0", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "workspace_members" @@ -11086,23 +11129,17 @@ dependencies = [ "quote", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "ws_stream_wasm" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" dependencies = [ "async_io_stream", "futures", @@ -11111,7 +11148,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper", - "thiserror 1.0.69", + "thiserror 2.0.17", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -11157,14 +11194,14 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] [[package]] name = "xdelta3" version = "0.1.5" -source = "git+http://github.com/sigp/xdelta3-rs?rev=4db64086bb02e9febb584ba93b9d16bb2ae3825a#4db64086bb02e9febb584ba93b9d16bb2ae3825a" +source = "git+https://github.com/sigp/xdelta3-rs?rev=4db64086bb02e9febb584ba93b9d16bb2ae3825a#4db64086bb02e9febb584ba93b9d16bb2ae3825a" dependencies = [ "bindgen", "cc", @@ -11177,9 +11214,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.25" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" [[package]] name = "xmltree" @@ -11210,7 +11247,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "static_assertions", @@ -11218,16 +11255,16 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.4" +version = "0.13.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" +checksum = "6927cfe0edfae4b26a369df6bad49cd0ef088c0ec48f4045b2084bcaedc10246" dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot 0.12.5", "pin-project", - "rand 0.8.5", + "rand 0.9.2", "static_assertions", "web-time", ] @@ -11243,9 +11280,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -11255,54 +11292,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" -dependencies = [ - "zerocopy-derive 0.8.23", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -11322,15 +11339,15 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "serde", "zeroize_derive", @@ -11344,14 +11361,25 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", @@ -11360,13 +11388,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -11404,7 +11432,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ - "zstd-safe 7.2.3", + "zstd-safe 7.2.4", ] [[package]] @@ -11419,18 +11447,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 0b930b605d..e579fed2cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,6 @@ members = [ "consensus/int_to_bytes", "consensus/merkle_proof", "consensus/proto_array", - "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", "consensus/types", @@ -117,7 +116,7 @@ byteorder = "1" bytes = "1" # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. -c-kzg = { version = "2.1.0", default-features = false } +c-kzg = { version = "2.1", default-features = false } cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } @@ -134,7 +133,6 @@ deposit_contract = { path = "common/deposit_contract" } derivative = "2" directory = { path = "common/directory" } dirs = "3" -discv5 = { version = "0.9", features = ["libp2p"] } doppelganger_service = { path = "validator_client/doppelganger_service" } either = "1.9" environment = { path = "lighthouse/environment" } @@ -144,6 +142,7 @@ eth2_key_derivation = { path = "crypto/eth2_key_derivation" } eth2_keystore = { path = "crypto/eth2_keystore" } eth2_network_config = { path = "common/eth2_network_config" } eth2_wallet = { path = "crypto/eth2_wallet" } +discv5 = { git= "https://github.com/sigp/discv5", features = ["libp2p"], branch = "mallory" } ethereum_hashing = "0.7.0" ethereum_serde_utils = "0.8.0" ethereum_ssz = "0.9.0" @@ -161,9 +160,8 @@ fork_choice = { path = "consensus/fork_choice" } fs2 = "0.4" futures = "0.3" genesis = { path = "beacon_node/genesis" } -# This is tracking the sigp-gossipsub branch on sigp/rust-libp2p commit: Aug 20 2025 -gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", rev = "5acdf89a65d64098f9346efa5769e57bcd19dea9", "features" = ["metrics"] } graffiti_file = { path = "validator_client/graffiti_file" } +gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", branch = "mallory", features = ["metrics"] } hashlink = "0.9.0" health_metrics = { path = "common/health_metrics" } hex = "0.4" @@ -224,8 +222,8 @@ reqwest = { version = "0.11", default-features = false, features = [ ring = "0.17" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } -rust_eth_kzg = "0.9.0" -safe_arith = { path = "consensus/safe_arith" } +rust_eth_kzg = "0.9" +safe_arith = "0.1" sensitive_url = { path = "common/sensitive_url" } serde = { version = "1", features = ["derive"] } serde_json = "1" @@ -242,7 +240,7 @@ ssz_types = "0.11.0" state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } -superstruct = "0.8" +superstruct = "0.10" swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } syn = "1" sysinfo = "0.26" @@ -279,7 +277,7 @@ validator_test_rig = { path = "testing/validator_test_rig" } warp = { version = "0.3.7", default-features = false, features = ["tls"] } warp_utils = { path = "common/warp_utils" } workspace_members = { path = "common/workspace_members" } -xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "4db64086bb02e9febb584ba93b9d16bb2ae3825a" } +xdelta3 = { git = "https://github.com/sigp/xdelta3-rs", rev = "4db64086bb02e9febb584ba93b9d16bb2ae3825a" } zeroize = { version = "1", features = ["zeroize_derive", "serde"] } zip = "0.6" zstd = "0.13" diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index dd7416af54..8e2c598fd4 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.1.0" +version = "8.0.0-rc.1" authors = [ "Paul Hauner ", "Age Manning BeaconBlockStreamer { if self.check_caches == CheckCaches::Yes { match self.beacon_chain.get_block_process_status(&root) { BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) + BlockProcessStatus::NotValidated(block, _) | BlockProcessStatus::ExecutionValidated(block) => { metrics::inc_counter(&metrics::BEACON_REQRESP_PRE_IMPORT_CACHE_HITS); Some(block) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6e11b66610..85ccb96f69 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5,8 +5,9 @@ use crate::attestation_verification::{ }; use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; -use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::beacon_proposer_cache::compute_proposer_duties_from_head; +use crate::beacon_proposer_cache::{ + BeaconProposerCache, EpochBlockProposers, ensure_state_can_determine_proposers_for_epoch, +}; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; @@ -124,7 +125,7 @@ use store::{ BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use task_executor::{ShutdownReason, TaskExecutor}; +use task_executor::{RayonPoolType, ShutdownReason, TaskExecutor}; use tokio_stream::Stream; use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; @@ -334,16 +335,12 @@ pub enum BlockProcessStatus { /// Block is not in any pre-import cache. Block may be in the data-base or in the fork-choice. Unknown, /// Block is currently processing but not yet validated. - NotValidated(Arc>), + NotValidated(Arc>, BlockImportSource), /// Block is fully valid, but not yet imported. It's cached in the da_checker while awaiting /// missing block components. ExecutionValidated(Arc>), } -pub struct BeaconChainMetrics { - pub reqresp_pre_import_cache_len: usize, -} - pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); pub type BeaconForkChoice = ForkChoice< @@ -363,9 +360,6 @@ pub type BeaconStore = Arc< >, >; -/// Cache gossip verified blocks to serve over ReqResp before they are imported -type ReqRespPreImportCache = HashMap>>; - /// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block /// operations and chooses a canonical head. pub struct BeaconChain { @@ -462,8 +456,6 @@ pub struct BeaconChain { pub(crate) attester_cache: Arc, /// A cache used when producing attestations whilst the head block is still being imported. pub early_attester_cache: EarlyAttesterCache, - /// Cache gossip verified blocks to serve over ReqResp before they are imported - pub reqresp_pre_import_cache: Arc>>, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. @@ -1289,18 +1281,8 @@ impl BeaconChain { /// chain. Used by sync to learn the status of a block and prevent repeated downloads / /// processing attempts. pub fn get_block_process_status(&self, block_root: &Hash256) -> BlockProcessStatus { - if let Some(block) = self - .data_availability_checker - .get_execution_valid_block(block_root) - { - return BlockProcessStatus::ExecutionValidated(block); - } - - if let Some(block) = self.reqresp_pre_import_cache.read().get(block_root) { - // A block is on the `reqresp_pre_import_cache` but NOT in the - // `data_availability_checker` only if it is actively processing. We can expect a future - // event with the result of processing - return BlockProcessStatus::NotValidated(block.clone()); + if let Some(cached_block) = self.data_availability_checker.get_cached_block(block_root) { + return cached_block; } BlockProcessStatus::Unknown @@ -3054,8 +3036,7 @@ impl BeaconChain { self.emit_sse_blob_sidecar_events(&block_root, std::iter::once(blob.as_blob())); - let r = self.check_gossip_blob_availability_and_import(blob).await; - self.remove_notified(&block_root, r) + self.check_gossip_blob_availability_and_import(blob).await } /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was @@ -3092,15 +3073,13 @@ impl BeaconChain { data_columns.iter().map(|column| column.as_data_column()), ); - let r = self - .check_gossip_data_columns_availability_and_import( - slot, - block_root, - data_columns, - publish_fn, - ) - .await; - self.remove_notified(&block_root, r) + self.check_gossip_data_columns_availability_and_import( + slot, + block_root, + data_columns, + publish_fn, + ) + .await } /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was @@ -3139,10 +3118,8 @@ impl BeaconChain { self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); - let r = self - .check_rpc_blob_availability_and_import(slot, block_root, blobs) - .await; - self.remove_notified(&block_root, r) + self.check_rpc_blob_availability_and_import(slot, block_root, blobs) + .await } /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. @@ -3174,10 +3151,8 @@ impl BeaconChain { } } - let r = self - .check_engine_blobs_availability_and_import(slot, block_root, engine_get_blobs_output) - .await; - self.remove_notified(&block_root, r) + self.check_engine_blobs_availability_and_import(slot, block_root, engine_get_blobs_output) + .await } fn emit_sse_blob_sidecar_events<'a, I>(self: &Arc, block_root: &Hash256, blobs_iter: I) @@ -3270,10 +3245,8 @@ impl BeaconChain { custody_columns.iter().map(|column| column.as_ref()), ); - let r = self - .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) - .await; - self.remove_notified(&block_root, r) + self.check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) + .await } pub async fn reconstruct_data_columns( @@ -3299,15 +3272,15 @@ impl BeaconChain { let data_availability_checker = self.data_availability_checker.clone(); + let current_span = Span::current(); let result = self .task_executor - .spawn_blocking_handle( - move || data_availability_checker.reconstruct_data_columns(&block_root), - "reconstruct_data_columns", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? + .spawn_blocking_with_rayon_async(RayonPoolType::HighPriority, move || { + let _guard = current_span.enter(); + data_availability_checker.reconstruct_data_columns(&block_root) + }) .await - .map_err(BeaconChainError::TokioJoin)??; + .map_err(|_| BeaconChainError::RuntimeShutdown)??; match result { DataColumnReconstructionResult::Success((availability, data_columns_to_publish)) => { @@ -3316,10 +3289,8 @@ impl BeaconChain { return Ok(None); }; - let r = self - .process_availability(slot, availability, || Ok(())) - .await; - self.remove_notified(&block_root, r) + self.process_availability(slot, availability, || Ok(())) + .await .map(|availability_processing_status| { Some((availability_processing_status, data_columns_to_publish)) }) @@ -3336,46 +3307,6 @@ impl BeaconChain { } } - /// Remove any block components from the *processing cache* if we no longer require them. If the - /// block was imported full or erred, we no longer require them. - fn remove_notified( - &self, - block_root: &Hash256, - r: Result, - ) -> Result { - let has_missing_components = - matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _))); - if !has_missing_components { - self.reqresp_pre_import_cache.write().remove(block_root); - } - r - } - - /// Wraps `process_block` in logic to cache the block's commitments in the processing cache - /// and evict if the block was imported or errored. - pub async fn process_block_with_early_caching>( - self: &Arc, - block_root: Hash256, - unverified_block: B, - block_source: BlockImportSource, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result { - self.reqresp_pre_import_cache - .write() - .insert(block_root, unverified_block.block_cloned()); - - let r = self - .process_block( - block_root, - unverified_block, - notify_execution_layer, - block_source, - || Ok(()), - ) - .await; - self.remove_notified(&block_root, r) - } - /// Check for known and configured invalid block roots before processing. pub fn check_invalid_block_roots(&self, block_root: Hash256) -> Result<(), BlockError> { if self.config.invalid_block_roots.contains(&block_root) { @@ -3407,12 +3338,6 @@ impl BeaconChain { block_source: BlockImportSource, publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { - // Start the Prometheus timer. - let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); - - // Increment the Prometheus counter for block processing requests. - metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); - let block_slot = unverified_block.block().slot(); // Set observed time if not already set. Usually this should be set by gossip or RPC, @@ -3427,6 +3352,18 @@ impl BeaconChain { ); } + self.data_availability_checker.put_pre_execution_block( + block_root, + unverified_block.block_cloned(), + block_source, + )?; + + // Start the Prometheus timer. + let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); + + // Increment the Prometheus counter for block processing requests. + metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + // A small closure to group the verification and import errors. let chain = self.clone(); let import_block = async move { @@ -3444,7 +3381,18 @@ impl BeaconChain { .set_time_consensus_verified(block_root, block_slot, timestamp) } - let executed_block = chain.into_executed_block(execution_pending).await?; + let executed_block = chain + .into_executed_block(execution_pending) + .await + .inspect_err(|_| { + // If the block fails execution for whatever reason (e.g. engine offline), + // and we keep it in the cache, then the node will NOT perform lookup and + // reprocess this block until the block is evicted from DA checker, causing the + // chain to get stuck temporarily if the block is canonical. Therefore we remove + // it from the cache if execution fails. + self.data_availability_checker + .remove_block_on_execution_error(&block_root); + })?; // Record the *additional* time it took to wait for execution layer verification. if let Some(timestamp) = self.slot_clock.now_duration() { @@ -3570,9 +3518,7 @@ impl BeaconChain { block: AvailabilityPendingExecutedBlock, ) -> Result { let slot = block.block.slot(); - let availability = self - .data_availability_checker - .put_pending_executed_block(block)?; + let availability = self.data_availability_checker.put_executed_block(block)?; self.process_availability(slot, availability, || Ok(())) .await } @@ -3889,9 +3835,16 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } + // Take an upgradable read lock on fork choice so we can check if this block has already + // been imported. We don't want to repeat work importing a block that is already imported. + let fork_choice_reader = self.canonical_head.fork_choice_upgradable_read_lock(); + if fork_choice_reader.contains_block(&block_root) { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by // avoiding taking other locks whilst holding this lock. - let mut fork_choice = self.canonical_head.fork_choice_write_lock(); + let mut fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); // Do not import a block that doesn't descend from the finalized root. let signed_block = @@ -4004,7 +3957,7 @@ impl BeaconChain { // See https://github.com/sigp/lighthouse/issues/2028 let (_, signed_block, block_data) = signed_block.deconstruct(); - match self.get_blobs_or_columns_store_op(block_root, block_data) { + match self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) { Ok(Some(blobs_or_columns_store_op)) => { ops.push(blobs_or_columns_store_op); } @@ -4746,65 +4699,54 @@ impl BeaconChain { // Compute the proposer index. let head_epoch = cached_head.head_slot().epoch(T::EthSpec::slots_per_epoch()); - let shuffling_decision_root = if head_epoch == proposal_epoch { - cached_head - .snapshot - .beacon_state - .proposer_shuffling_decision_root(proposer_head)? - } else { - proposer_head - }; - let cached_proposer = self - .beacon_proposer_cache - .lock() - .get_slot::(shuffling_decision_root, proposal_slot); - let proposer_index = if let Some(proposer) = cached_proposer { - proposer.index as u64 - } else { - if head_epoch + self.config.sync_tolerance_epochs < proposal_epoch { - warn!( - msg = "this is a non-critical issue that can happen on unhealthy nodes or \ - networks.", - %proposal_epoch, - %head_epoch, - "Skipping proposer preparation" - ); + let shuffling_decision_root = cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root_at_epoch(proposal_epoch, proposer_head, &self.spec)?; - // Don't skip the head forward more than two epochs. This avoids burdening an - // unhealthy node. - // - // Although this node might miss out on preparing for a proposal, they should still - // be able to propose. This will prioritise beacon chain health over efficient - // packing of execution blocks. - return Ok(None); + let Some(proposer_index) = self.with_proposer_cache( + shuffling_decision_root, + proposal_epoch, + |proposers| proposers.get_slot::(proposal_slot).map(|p| p.index as u64), + || { + if head_epoch + self.config.sync_tolerance_epochs < proposal_epoch { + warn!( + msg = "this is a non-critical issue that can happen on unhealthy nodes or \ + networks", + %proposal_epoch, + %head_epoch, + "Skipping proposer preparation" + ); + + // Don't skip the head forward too many epochs. This avoids burdening an + // unhealthy node. + // + // Although this node might miss out on preparing for a proposal, they should + // still be able to propose. This will prioritise beacon chain health over + // efficient packing of execution blocks. + Err(Error::SkipProposerPreparation) + } else { + let head = self.canonical_head.cached_head(); + Ok(( + head.head_state_root(), + head.snapshot.beacon_state.clone(), + )) + } + }, + ).map_or_else(|e| { + match e { + Error::ProposerCacheIncorrectState { .. } => { + warn!("Head changed during proposer preparation"); + Ok(None) + } + Error::SkipProposerPreparation => { + // Warning logged for this above. + Ok(None) + } + e => Err(e) } - - let (proposers, decision_root, _, fork) = - compute_proposer_duties_from_head(proposal_epoch, self)?; - - let proposer_offset = (proposal_slot % T::EthSpec::slots_per_epoch()).as_usize(); - let proposer = *proposers - .get(proposer_offset) - .ok_or(BeaconChainError::NoProposerForSlot(proposal_slot))?; - - self.beacon_proposer_cache.lock().insert( - proposal_epoch, - decision_root, - proposers, - fork, - )?; - - // It's possible that the head changes whilst computing these duties. If so, abandon - // this routine since the change of head would have also spawned another instance of - // this routine. - // - // Exit now, after updating the cache. - if decision_root != shuffling_decision_root { - warn!("Head changed during proposer preparation"); - return Ok(None); - } - - proposer as u64 + }, |value| Ok(Some(value)))? else { + return Ok(None); }; // Get the `prev_randao` and parent block number. @@ -4964,14 +4906,19 @@ impl BeaconChain { // Only attempt a re-org if we have a proposer registered for the re-org slot. let proposing_at_re_org_slot = { - // The proposer shuffling has the same decision root as the next epoch attestation - // shuffling. We know our re-org block is not on the epoch boundary, so it has the - // same proposer shuffling as the head (but not necessarily the parent which may lie - // in the previous epoch). - let shuffling_decision_root = info - .head_node - .next_epoch_shuffling_id - .shuffling_decision_block; + // We know our re-org block is not on the epoch boundary, so it has the same proposer + // shuffling as the head (but not necessarily the parent which may lie in the previous + // epoch). + let shuffling_decision_root = if self + .spec + .fork_name_at_slot::(re_org_block_slot) + .fulu_enabled() + { + info.head_node.current_epoch_shuffling_id + } else { + info.head_node.next_epoch_shuffling_id + } + .shuffling_decision_block; let proposer_index = self .beacon_proposer_cache .lock() @@ -5286,16 +5233,20 @@ impl BeaconChain { None }; + let slashings_and_exits_span = debug_span!("get_slashings_and_exits").entered(); let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); + drop(slashings_and_exits_span); let eth1_data = state.eth1_data().clone(); let deposits = vec![]; + let bls_changes_span = debug_span!("get_bls_to_execution_changes").entered(); let bls_to_execution_changes = self .op_pool .get_bls_to_execution_changes(&state, &self.spec); + drop(bls_changes_span); // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. @@ -6606,6 +6557,70 @@ impl BeaconChain { } } + pub fn with_proposer_cache + From>( + &self, + shuffling_decision_block: Hash256, + proposal_epoch: Epoch, + accessor: impl Fn(&EpochBlockProposers) -> Result, + state_provider: impl FnOnce() -> Result<(Hash256, BeaconState), E>, + ) -> Result { + let cache_entry = self + .beacon_proposer_cache + .lock() + .get_or_insert_key(proposal_epoch, shuffling_decision_block); + + // If the cache entry is not initialised, run the code to initialise it inside a OnceCell. + // This prevents duplication of work across multiple threads. + // + // If it is already initialised, then `get_or_try_init` will return immediately without + // executing the initialisation code at all. + let epoch_block_proposers = cache_entry.get_or_try_init(|| { + debug!( + ?shuffling_decision_block, + %proposal_epoch, + "Proposer shuffling cache miss" + ); + + // Fetch the state on-demand if the required epoch was missing from the cache. + // If the caller wants to not compute the state they must return an error here and then + // catch it at the call site. + let (state_root, mut state) = state_provider()?; + + // Ensure the state can compute proposer duties for `epoch`. + ensure_state_can_determine_proposers_for_epoch( + &mut state, + state_root, + proposal_epoch, + &self.spec, + )?; + + // Sanity check the state. + let latest_block_root = state.get_latest_block_root(state_root); + let state_decision_block_root = state.proposer_shuffling_decision_root_at_epoch( + proposal_epoch, + latest_block_root, + &self.spec, + )?; + if state_decision_block_root != shuffling_decision_block { + return Err(Error::ProposerCacheIncorrectState { + state_decision_block_root, + requested_decision_block_root: shuffling_decision_block, + } + .into()); + } + + let proposers = state.get_beacon_proposer_indices(proposal_epoch, &self.spec)?; + Ok::<_, E>(EpochBlockProposers::new( + proposal_epoch, + state.fork(), + proposers, + )) + })?; + + // Run the accessor function on the computed epoch proposers. + accessor(epoch_block_proposers).map_err(Into::into) + } + /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head /// `head_block_root`. The `map_fn` will be supplied two values: /// @@ -7145,15 +7160,10 @@ impl BeaconChain { ) } - pub fn metrics(&self) -> BeaconChainMetrics { - BeaconChainMetrics { - reqresp_pre_import_cache_len: self.reqresp_pre_import_cache.read().len(), - } - } - pub(crate) fn get_blobs_or_columns_store_op( &self, block_root: Hash256, + block_slot: Slot, block_data: AvailableBlockData, ) -> Result>, String> { match block_data { @@ -7166,7 +7176,15 @@ impl BeaconChain { ); Ok(Some(StoreOp::PutBlobs(block_root, blobs))) } - AvailableBlockData::DataColumns(data_columns) => { + AvailableBlockData::DataColumns(mut data_columns) => { + let columns_to_custody = self.custody_columns_for_epoch(Some( + block_slot.epoch(T::EthSpec::slots_per_epoch()), + )); + // Supernodes need to persist all sampled custody columns + if columns_to_custody.len() != self.spec.number_of_custody_groups as usize { + data_columns + .retain(|data_column| columns_to_custody.contains(&data_column.index)); + } debug!( %block_root, count = data_columns.len(), diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 12970214c6..a64b4981cc 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -12,9 +12,9 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use fork_choice::ExecutionStatus; use lru::LruCache; use once_cell::sync::OnceCell; +use safe_arith::SafeArith; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; -use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; use types::non_zero_usize::new_non_zero_usize; @@ -51,6 +51,34 @@ pub struct EpochBlockProposers { pub(crate) proposers: SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>, } +impl EpochBlockProposers { + pub fn new(epoch: Epoch, fork: Fork, proposers: Vec) -> Self { + Self { + epoch, + fork, + proposers: proposers.into(), + } + } + + pub fn get_slot(&self, slot: Slot) -> Result { + let epoch = slot.epoch(E::slots_per_epoch()); + if epoch == self.epoch { + self.proposers + .get(slot.as_usize() % E::SlotsPerEpoch::to_usize()) + .map(|&index| Proposer { + index, + fork: self.fork, + }) + .ok_or(BeaconChainError::ProposerCacheOutOfBounds { slot, epoch }) + } else { + Err(BeaconChainError::ProposerCacheWrongEpoch { + request_epoch: epoch, + cache_epoch: self.epoch, + }) + } + } +} + /// A cache to store the proposers for some epoch. /// /// See the module-level documentation for more information. @@ -76,23 +104,8 @@ impl BeaconProposerCache { ) -> Option { let epoch = slot.epoch(E::slots_per_epoch()); let key = (epoch, shuffling_decision_block); - let cache_opt = self.cache.get(&key).and_then(|cell| cell.get()); - if let Some(cache) = cache_opt { - // This `if` statement is likely unnecessary, but it feels like good practice. - if epoch == cache.epoch { - cache - .proposers - .get(slot.as_usize() % E::SlotsPerEpoch::to_usize()) - .map(|&index| Proposer { - index, - fork: cache.fork, - }) - } else { - None - } - } else { - None - } + let cache = self.cache.get(&key)?.get()?; + cache.get_slot::(slot).ok() } /// As per `Self::get_slot`, but returns all proposers in all slots for the given `epoch`. @@ -142,11 +155,7 @@ impl BeaconProposerCache { ) -> Result<(), BeaconStateError> { let key = (epoch, shuffling_decision_block); if !self.cache.contains(&key) { - let epoch_proposers = EpochBlockProposers { - epoch, - fork, - proposers: proposers.into(), - }; + let epoch_proposers = EpochBlockProposers::new(epoch, fork, proposers); self.cache .put(key, Arc::new(OnceCell::with_value(epoch_proposers))); } @@ -178,7 +187,12 @@ pub fn compute_proposer_duties_from_head( .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; // Advance the state into the requested epoch. - ensure_state_is_in_epoch(&mut state, head_state_root, request_epoch, &chain.spec)?; + ensure_state_can_determine_proposers_for_epoch( + &mut state, + head_state_root, + request_epoch, + &chain.spec, + )?; let indices = state .get_beacon_proposer_indices(request_epoch, &chain.spec) @@ -186,13 +200,13 @@ pub fn compute_proposer_duties_from_head( let dependent_root = state // The only block which decides its own shuffling is the genesis block. - .proposer_shuffling_decision_root(chain.genesis_block_root) + .proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec) .map_err(BeaconChainError::from)?; Ok((indices, dependent_root, execution_status, state.fork())) } -/// If required, advance `state` to `target_epoch`. +/// If required, advance `state` to the epoch required to determine proposer indices in `target_epoch`. /// /// ## Details /// @@ -200,22 +214,39 @@ pub fn compute_proposer_duties_from_head( /// - No-op if `state.current_epoch() == target_epoch`. /// - It must be the case that `state.canonical_root() == state_root`, but this function will not /// check that. -pub fn ensure_state_is_in_epoch( +pub fn ensure_state_can_determine_proposers_for_epoch( state: &mut BeaconState, state_root: Hash256, target_epoch: Epoch, spec: &ChainSpec, ) -> Result<(), BeaconChainError> { - match state.current_epoch().cmp(&target_epoch) { - // Protects against an inconsistent slot clock. - Ordering::Greater => Err(BeaconStateError::SlotOutOfBounds.into()), - // The state needs to be advanced. - Ordering::Less => { + // The decision slot is the end of an epoch, so we add 1 to reach the first slot of the epoch + // at which the shuffling is determined. + let minimum_slot = spec + .proposer_shuffling_decision_slot::(target_epoch) + .safe_add(1)?; + let minimum_epoch = minimum_slot.epoch(E::slots_per_epoch()); + + // Before and after Fulu, the oldest epoch reachable from a state at epoch N is epoch N itself, + // i.e. we can never "look back". + let maximum_epoch = target_epoch; + + if state.current_epoch() > maximum_epoch { + Err(BeaconStateError::SlotOutOfBounds.into()) + } else if state.current_epoch() >= minimum_epoch { + if target_epoch > state.current_epoch() { let target_slot = target_epoch.start_slot(E::slots_per_epoch()); + + // Advance the state into the same epoch as the block. Use the "partial" method since state + // roots are not important for proposer/attester shuffling. partial_state_advance(state, Some(state_root), target_slot, spec) - .map_err(BeaconChainError::from) + .map_err(BeaconChainError::from)?; } - // The state is suitable, nothing to do. - Ordering::Equal => Ok(()), + Ok(()) + } else { + // State's current epoch is less than the minimum epoch. + // Advance the state up to the minimum epoch. + partial_state_advance(state, Some(state_root), minimum_slot, spec) + .map_err(BeaconChainError::from) } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 53676c0b24..53f2eff0ca 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -5,8 +5,7 @@ use std::sync::Arc; use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::block_verification::{ - BlockSlashInfo, cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, - process_block_slash_info, + BlockSlashInfo, get_validator_pubkey_cache, process_block_slash_info, }; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::observed_data_sidecars::{ObservationStrategy, Observe}; @@ -494,59 +493,31 @@ pub fn validate_blob_sidecar_for_gossip(proposer_shuffling_root, blob_slot); - - let (proposer_index, fork) = if let Some(proposer) = proposer_opt { - (proposer.index, proposer.fork) - } else { - debug!( - %block_root, - %blob_index, - "Proposer shuffling cache miss for blob verification" - ); - let (parent_state_root, mut parent_state) = chain - .store - .get_advanced_hot_state(block_parent_root, blob_slot, parent_block.state_root) - .map_err(|e| GossipBlobError::BeaconChainError(Box::new(e.into())))? - .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing state for parent block {block_parent_root:?}", - )) - })?; - - let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( - &mut parent_state, - Some(parent_state_root), - blob_slot, - &chain.spec, - )?; - - let epoch = state.current_epoch(); - let proposers = state.get_beacon_proposer_indices(epoch, &chain.spec)?; - let proposer_index = *proposers - .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) - .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; - - // Prime the proposer shuffling cache with the newly-learned value. - chain.beacon_proposer_cache.lock().insert( - blob_epoch, - proposer_shuffling_root, - proposers, - state.fork(), - )?; - (proposer_index, state.fork()) - }; + let proposer = chain.with_proposer_cache( + proposer_shuffling_root, + blob_epoch, + |proposers| proposers.get_slot::(blob_slot), + || { + debug!( + %block_root, + index = %blob_index, + "Proposer shuffling cache miss for blob verification" + ); + chain + .store + .get_advanced_hot_state(block_parent_root, blob_slot, parent_block.state_root) + .map_err(|e| GossipBlobError::BeaconChainError(Box::new(e.into())))? + .ok_or_else(|| { + GossipBlobError::BeaconChainError(Box::new(BeaconChainError::DBInconsistent( + format!("Missing state for parent block {block_parent_root:?}",), + ))) + }) + }, + )?; + let proposer_index = proposer.index; + let fork = proposer.fork; // Signature verify the signed block header. let signature_is_valid = { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1d10fae0a4..d0ed8258e5 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -948,61 +948,35 @@ impl GossipVerifiedBlock { } let proposer_shuffling_decision_block = - if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == block_epoch { - parent_block - .next_epoch_shuffling_id - .shuffling_decision_block - } else { - parent_block.root - }; + parent_block.proposer_shuffling_root_for_child_block(block_epoch, &chain.spec); // We assign to a variable instead of using `if let Some` directly to ensure we drop the // write lock before trying to acquire it again in the `else` clause. - let proposer_opt = chain - .beacon_proposer_cache - .lock() - .get_slot::(proposer_shuffling_decision_block, block.slot()); - let (expected_proposer, fork, parent, block) = if let Some(proposer) = proposer_opt { - // The proposer index was cached and we can return it without needing to load the - // parent. - (proposer.index, proposer.fork, None, block) - } else { - // The proposer index was *not* cached and we must load the parent in order to determine - // the proposer index. - let (mut parent, block) = load_parent(block, chain)?; - - debug!( - parent_root = ?parent.beacon_block_root, - parent_slot = %parent.beacon_block.slot(), - ?block_root, - block_slot = %block.slot(), - "Proposer shuffling cache miss" - ); - - // The state produced is only valid for determining proposer/attester shuffling indices. - let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( - &mut parent.pre_state, - parent.beacon_state_root, - block.slot(), - &chain.spec, - )?; - - let epoch = state.current_epoch(); - let proposers = state.get_beacon_proposer_indices(epoch, &chain.spec)?; - let proposer_index = *proposers - .get(block.slot().as_usize() % T::EthSpec::slots_per_epoch() as usize) - .ok_or_else(|| BeaconChainError::NoProposerForSlot(block.slot()))?; - - // Prime the proposer shuffling cache with the newly-learned value. - chain.beacon_proposer_cache.lock().insert( - block_epoch, - proposer_shuffling_decision_block, - proposers, - state.fork(), - )?; - - (proposer_index, state.fork(), Some(parent), block) - }; + let block_slot = block.slot(); + let mut opt_parent = None; + let proposer = chain.with_proposer_cache::<_, BlockError>( + proposer_shuffling_decision_block, + block_epoch, + |proposers| proposers.get_slot::(block_slot), + || { + // The proposer index was *not* cached and we must load the parent in order to + // determine the proposer index. + let (mut parent, _) = load_parent(block.clone(), chain)?; + let parent_state_root = if let Some(state_root) = parent.beacon_state_root { + state_root + } else { + // This is potentially a little inefficient, although we are likely to need + // the state's hash eventually (if the block is valid), and we are also likely + // to already have the hash cached (if fetched from the state cache). + parent.pre_state.canonical_root()? + }; + let parent_state = parent.pre_state.clone(); + opt_parent = Some(parent); + Ok((parent_state_root, parent_state)) + }, + )?; + let expected_proposer = proposer.index; + let fork = proposer.fork; let signature_is_valid = { let pubkey_cache = get_validator_pubkey_cache(chain)?; @@ -1077,7 +1051,7 @@ impl GossipVerifiedBlock { Ok(Self { block, block_root, - parent, + parent: opt_parent, consensus_context, }) } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5e7aa7d4f8..5564c7916f 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -899,6 +899,7 @@ where let genesis_time = head_snapshot.beacon_state.genesis_time(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + let complete_blob_backfill = self.chain_config.complete_blob_backfill; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -997,7 +998,6 @@ where validator_pubkey_cache: RwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), - reqresp_pre_import_cache: <_>::default(), light_client_server_cache: LightClientServerCache::new(), light_client_server_tx: self.light_client_server_tx, shutdown_sender: self @@ -1013,6 +1013,7 @@ where genesis_backfill_slot, data_availability_checker: Arc::new( DataAvailabilityChecker::new( + complete_blob_backfill, slot_clock, self.kzg.clone(), store, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 56d1975972..7dd4c88c51 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -47,8 +47,9 @@ use fork_choice::{ ResetPayloadStatuses, }; use itertools::process_results; +use lighthouse_tracing::SPAN_RECOMPUTE_HEAD; use logging::crit; -use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; @@ -57,6 +58,7 @@ use store::{ Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreConfig, iter::StateRootsIterator, }; use task_executor::{JoinHandle, ShutdownReason}; +use tracing::info_span; use tracing::{debug, error, info, instrument, warn}; use types::*; @@ -79,6 +81,10 @@ impl CanonicalHeadRwLock { self.0.read() } + fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, T> { + self.0.upgradable_read() + } + fn write(&self) -> RwLockWriteGuard<'_, T> { self.0.write() } @@ -379,6 +385,7 @@ impl CanonicalHead { /// /// This function is **not safe** to be public. See the module-level documentation for more /// information about protecting from deadlocks. + #[instrument(skip_all)] fn cached_head_write_lock(&self) -> RwLockWriteGuard<'_, CachedHead> { self.cached_head.write() } @@ -389,7 +396,16 @@ impl CanonicalHead { self.fork_choice.read() } + /// Access an upgradable read-lock for fork choice. + pub fn fork_choice_upgradable_read_lock( + &self, + ) -> RwLockUpgradableReadGuard<'_, BeaconForkChoice> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_UPGRADABLE_READ_LOCK_AQUIRE_TIMES); + self.fork_choice.upgradable_read() + } + /// Access a write-lock for fork choice. + #[instrument(skip_all)] pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard<'_, BeaconForkChoice> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES); self.fork_choice.write() @@ -497,13 +513,21 @@ impl BeaconChain { /// situation can be rectified. We avoid returning an error here so that calling functions /// can't abort block import because an error is returned here. pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) { + let span = info_span!( + SPAN_RECOMPUTE_HEAD, + slot = %current_slot + ); + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); let chain = self.clone(); match self .spawn_blocking_handle( - move || chain.recompute_head_at_slot_internal(current_slot), + move || { + let _guard = span.enter(); + chain.recompute_head_at_slot_internal(current_slot) + }, "recompute_head_internal", ) .await @@ -761,6 +785,7 @@ impl BeaconChain { } /// Perform updates to caches and other components after the canonical head has been changed. + #[instrument(skip_all)] fn after_new_head( self: &Arc, old_cached_head: &CachedHead, @@ -804,7 +829,7 @@ impl BeaconChain { let head_slot = new_snapshot.beacon_state.slot(); let dependent_root = new_snapshot .beacon_state - .proposer_shuffling_decision_root(self.genesis_block_root); + .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Next); let prev_dependent_root = new_snapshot .beacon_state .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); @@ -899,6 +924,7 @@ impl BeaconChain { /// /// This function will take a write-lock on `canonical_head.fork_choice`, therefore it would be /// unwise to hold any lock on fork choice while calling this function. + #[instrument(skip_all)] fn after_finalization( self: &Arc, new_cached_head: &CachedHead, @@ -911,13 +937,6 @@ impl BeaconChain { .execution_status .is_optimistic_or_invalid(); - self.op_pool.prune_all( - &new_snapshot.beacon_block, - &new_snapshot.beacon_state, - self.epoch()?, - &self.spec, - ); - self.observed_block_producers.write().prune( new_view .finalized_checkpoint @@ -956,9 +975,9 @@ impl BeaconChain { })); } - // The store migration task requires the *state at the slot of the finalized epoch*, - // rather than the state of the latest finalized block. These two values will only - // differ when the first slot of the finalized epoch is a skip slot. + // The store migration task and op pool pruning require the *state at the first slot of the + // finalized epoch*, rather than the state of the latest finalized block. These two values + // will only differ when the first slot of the finalized epoch is a skip slot. // // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` // to ensure we use the same state that we just set as the head. @@ -980,6 +999,23 @@ impl BeaconChain { )? .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + let update_cache = true; + let new_finalized_state = self + .store + .get_hot_state(&new_finalized_state_root, update_cache)? + .ok_or(Error::MissingBeaconState(new_finalized_state_root))?; + + self.op_pool.prune_all( + &new_snapshot.beacon_block, + &new_snapshot.beacon_state, + &new_finalized_state, + self.epoch()?, + &self.spec, + ); + + // We just pass the state root to the finalization thread. It should be able to reload the + // state from the state_cache near instantly anyway. We could experiment with sending the + // state over a channel in future, but it's probably no quicker. self.store_migrator.process_finalization( new_finalized_state_root.into(), new_view.finalized_checkpoint, @@ -1034,6 +1070,7 @@ impl BeaconChain { /// /// This function is called whilst holding a write-lock on the `canonical_head`. To ensure dead-lock /// safety, **do not take any other locks inside this function**. +#[instrument(skip_all)] fn check_finalized_payload_validity( chain: &BeaconChain, finalized_proto_block: &ProtoBlock, @@ -1117,6 +1154,7 @@ fn perform_debug_logging( } } +#[instrument(skip_all)] fn spawn_execution_layer_updates( chain: Arc>, forkchoice_update_params: ForkchoiceUpdateParameters, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index d6be96afe9..a7defa9fa2 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -86,6 +86,8 @@ pub struct ChainConfig { /// If using a weak-subjectivity sync, whether we should download blocks all the way back to /// genesis. pub genesis_backfill: bool, + /// EXPERIMENTAL: backfill blobs and data columns beyond the data availability window. + pub complete_blob_backfill: bool, /// Whether to send payload attributes every slot, regardless of connected proposers. /// /// This is useful for block builders and testing. @@ -144,6 +146,7 @@ impl Default for ChainConfig { optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, genesis_backfill: false, + complete_blob_backfill: false, always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, enable_light_client_server: true, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 9225ed6b47..43b7d8f7ea 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -7,7 +7,9 @@ use crate::block_verification_types::{ use crate::data_availability_checker::overflow_lru_cache::{ DataAvailabilityCheckerInner, ReconstructColumnsDecision, }; -use crate::{BeaconChain, BeaconChainTypes, BeaconStore, CustodyContext, metrics}; +use crate::{ + BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, CustodyContext, metrics, +}; use kzg::Kzg; use slot_clock::SlotClock; use std::fmt; @@ -19,14 +21,15 @@ use task_executor::TaskExecutor; use tracing::{debug, error, instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ - BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, EthSpec, Hash256, - SignedBeaconBlock, Slot, + BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, + EthSpec, Hash256, SignedBeaconBlock, Slot, }; mod error; mod overflow_lru_cache; mod state_lru_cache; +use crate::data_availability_checker::error::Error; use crate::data_column_verification::{ CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, verify_kzg_for_data_column_list, @@ -78,6 +81,7 @@ pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); /// proposer. Having a capacity > 1 is an optimization to prevent sync lookup from having re-fetch /// data during moments of unstable network conditions. pub struct DataAvailabilityChecker { + complete_blob_backfill: bool, availability_cache: Arc>, slot_clock: T::SlotClock, kzg: Arc, @@ -116,6 +120,7 @@ impl Debug for Availability { impl DataAvailabilityChecker { pub fn new( + complete_blob_backfill: bool, slot_clock: T::SlotClock, kzg: Arc, store: BeaconStore, @@ -129,6 +134,7 @@ impl DataAvailabilityChecker { spec.clone(), )?; Ok(Self { + complete_blob_backfill, availability_cache: Arc::new(inner), slot_clock, kzg, @@ -141,14 +147,12 @@ impl DataAvailabilityChecker { &self.custody_context } - /// Checks if the block root is currenlty in the availability cache awaiting import because + /// Checks if the block root is currently in the availability cache awaiting import because /// of missing components. - pub fn get_execution_valid_block( - &self, - block_root: &Hash256, - ) -> Option>> { - self.availability_cache - .get_execution_valid_block(block_root) + /// + /// Returns the cache block wrapped in a `BlockProcessStatus` enum if it exists. + pub fn get_cached_block(&self, block_root: &Hash256) -> Option> { + self.availability_cache.get_cached_block(block_root) } /// Return the set of cached blob indexes for `block_root`. Returns None if there is no block @@ -337,12 +341,30 @@ impl DataAvailabilityChecker { /// Check if we have all the blobs for a block. Returns `Availability` which has information /// about whether all components have been received or more are required. - pub fn put_pending_executed_block( + pub fn put_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { + self.availability_cache.put_executed_block(executed_block) + } + + /// Inserts a pre-execution block into the cache. + /// This does NOT override an existing executed block. + pub fn put_pre_execution_block( + &self, + block_root: Hash256, + block: Arc>, + source: BlockImportSource, + ) -> Result<(), Error> { self.availability_cache - .put_pending_executed_block(executed_block) + .put_pre_execution_block(block_root, block, source) + } + + /// Removes a pre-execution block from the cache. + /// This does NOT remove an existing executed block. + pub fn remove_block_on_execution_error(&self, block_root: &Hash256) { + self.availability_cache + .remove_pre_execution_block(block_root); } /// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may @@ -518,9 +540,15 @@ impl DataAvailabilityChecker { /// The epoch at which we require a data availability check in block processing. /// `None` if the `Deneb` fork is disabled. pub fn data_availability_boundary(&self) -> Option { - let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); - self.spec - .min_epoch_data_availability_boundary(current_epoch) + let fork_epoch = self.spec.deneb_fork_epoch?; + + if self.complete_blob_backfill { + Some(fork_epoch) + } else { + let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); + self.spec + .min_epoch_data_availability_boundary(current_epoch) + } } /// Returns true if the given epoch lies within the da boundary and false otherwise. @@ -547,6 +575,7 @@ impl DataAvailabilityChecker { } } + #[instrument(skip_all, level = "debug")] pub fn reconstruct_data_columns( &self, block_root: &Hash256, @@ -1075,7 +1104,15 @@ mod test { let kzg = get_kzg(&spec); let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); let custody_context = Arc::new(CustodyContext::new(false)); - DataAvailabilityChecker::new(slot_clock, kzg, store, custody_context, spec) - .expect("should initialise data availability checker") + let complete_blob_backfill = false; + DataAvailabilityChecker::new( + complete_blob_backfill, + slot_clock, + kzg, + store, + custody_context, + spec, + ) + .expect("should initialise data availability checker") } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 9de63f6126..42f6dbd856 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1,6 +1,5 @@ use super::AvailableBlockData; use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; -use crate::BeaconChainTypes; use crate::CustodyContext; use crate::beacon_chain::BeaconStore; use crate::blob_verification::KzgVerifiedBlob; @@ -9,6 +8,7 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; +use crate::{BeaconChainTypes, BlockProcessStatus}; use lighthouse_tracing::SPAN_PENDING_COMPONENTS; use lru::LruCache; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -16,12 +16,47 @@ use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; use tracing::{Span, debug, debug_span}; +use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, Epoch, EthSpec, - Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, + BlobSidecar, BlockImportSource, ChainSpec, ColumnIndex, DataColumnSidecar, + DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, + SignedBeaconBlock, }; +#[derive(Clone)] +pub enum CachedBlock { + PreExecution(Arc>, BlockImportSource), + Executed(Box>), +} + +impl CachedBlock { + pub fn get_commitments(&self) -> KzgCommitments { + let block = self.as_block(); + block + .message() + .body() + .blob_kzg_commitments() + .cloned() + .unwrap_or_default() + } + + fn as_block(&self) -> &SignedBeaconBlock { + match self { + CachedBlock::PreExecution(b, _) => b, + CachedBlock::Executed(b) => b.as_block(), + } + } + + pub fn num_blobs_expected(&self) -> usize { + self.as_block() + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()) + } +} + /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. @@ -39,22 +74,25 @@ pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, - pub executed_block: Option>, + pub block: Option>, pub reconstruction_started: bool, span: Span, } impl PendingComponents { - /// Returns an immutable reference to the cached block. - pub fn get_cached_block(&self) -> &Option> { - &self.executed_block - } - /// Returns an immutable reference to the fixed vector of cached blobs. pub fn get_cached_blobs(&self) -> &RuntimeFixedVector>> { &self.verified_blobs } + #[cfg(test)] + fn get_diet_block(&self) -> Option<&DietAvailabilityPendingExecutedBlock> { + self.block.as_ref().and_then(|block| match block { + CachedBlock::Executed(block) => Some(block.as_ref()), + _ => None, + }) + } + /// Returns an immutable reference to the cached data column. pub fn get_cached_data_column( &self, @@ -66,11 +104,6 @@ impl PendingComponents { .map(|d| d.clone_arc()) } - /// Returns a mutable reference to the cached block. - pub fn get_cached_block_mut(&mut self) -> &mut Option> { - &mut self.executed_block - } - /// Returns a mutable reference to the fixed vector of cached blobs. pub fn get_cached_blobs_mut(&mut self) -> &mut RuntimeFixedVector>> { &mut self.verified_blobs @@ -96,9 +129,21 @@ impl PendingComponents { .collect() } - /// Inserts a block into the cache. - pub fn insert_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { - *self.get_cached_block_mut() = Some(block) + /// Inserts an executed block into the cache. + pub fn insert_executed_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { + self.block = Some(CachedBlock::Executed(Box::new(block))) + } + + /// Inserts a pre-execution block into the cache. + /// This does NOT override an existing executed block. + pub fn insert_pre_execution_block( + &mut self, + block: Arc>, + source: BlockImportSource, + ) { + if self.block.is_none() { + self.block = Some(CachedBlock::PreExecution(block, source)) + } } /// Inserts a blob at a specific index in the cache. @@ -128,7 +173,7 @@ impl PendingComponents { /// 1. The blob entry at the index is empty and no block exists, or /// 2. The block exists and its commitment matches the blob's commitment. pub fn merge_single_blob(&mut self, index: usize, blob: KzgVerifiedBlob) { - if let Some(cached_block) = self.get_cached_block() { + if let Some(cached_block) = &self.block { let block_commitment_opt = cached_block.get_commitments().get(index).copied(); if let Some(block_commitment) = block_commitment_opt && block_commitment == *blob.get_commitment() @@ -158,7 +203,7 @@ impl PendingComponents { /// /// Blobs that don't match the new block's commitments are evicted. pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { - self.insert_block(block); + self.insert_executed_block(block); let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); } @@ -180,7 +225,7 @@ impl PendingComponents { &Span, ) -> Result, AvailabilityCheckError>, { - let Some(block) = &self.executed_block else { + let Some(CachedBlock::Executed(block)) = &self.block else { // Block not available yet return Ok(None); }; @@ -267,7 +312,7 @@ impl PendingComponents { block, import_data, payload_verification_outcome, - } = recover(block.clone(), &self.span)?; + } = recover(*block.clone(), &self.span)?; let available_block = AvailableBlock { block_root: self.block_root, @@ -295,7 +340,7 @@ impl PendingComponents { block_root, verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], - executed_block: None, + block: None, reconstruction_started: false, span, } @@ -307,9 +352,9 @@ impl PendingComponents { /// - The first data column /// Otherwise, returns None pub fn epoch(&self) -> Option { - // Get epoch from cached executed block - if let Some(executed_block) = &self.executed_block { - return Some(executed_block.as_block().epoch()); + // Get epoch from cached block + if let Some(block) = &self.block { + return Some(block.as_block().epoch()); } // Or, get epoch from first available blob @@ -326,7 +371,7 @@ impl PendingComponents { } pub fn status_str(&self, num_expected_columns_opt: Option) -> String { - let block_count = if self.executed_block.is_some() { 1 } else { 0 }; + let block_count = if self.block.is_some() { 1 } else { 0 }; if let Some(num_expected_columns) = num_expected_columns_opt { format!( "block {} data_columns {}/{}", @@ -335,7 +380,7 @@ impl PendingComponents { num_expected_columns ) } else { - let num_expected_blobs = if let Some(block) = self.get_cached_block() { + let num_expected_blobs = if let Some(block) = &self.block { &block.num_blobs_expected().to_string() } else { "?" @@ -387,18 +432,19 @@ impl DataAvailabilityCheckerInner { } /// Returns true if the block root is known, without altering the LRU ordering - pub fn get_execution_valid_block( - &self, - block_root: &Hash256, - ) -> Option>> { + pub fn get_cached_block(&self, block_root: &Hash256) -> Option> { self.critical .read() .peek(block_root) .and_then(|pending_components| { - pending_components - .executed_block - .as_ref() - .map(|block| block.block_cloned()) + pending_components.block.as_ref().map(|block| match block { + CachedBlock::PreExecution(b, source) => { + BlockProcessStatus::NotValidated(b.clone(), *source) + } + CachedBlock::Executed(b) => { + BlockProcessStatus::ExecutionValidated(b.block_cloned()) + } + }) }) } @@ -592,9 +638,9 @@ impl DataAvailabilityCheckerInner { /// Check whether data column reconstruction should be attempted. /// - /// Potentially trigger reconstruction if: - /// - Our custody requirement is all columns (supernode), and we haven't got all columns - /// - We have >= 50% of columns, but not all columns + /// Potentially trigger reconstruction if all the following satisfy: + /// - Our custody requirement is more than 50% of total columns, + /// - We haven't received all required columns /// - Reconstruction hasn't been started for the block /// /// If reconstruction is required, returns `PendingComponents` which contains the @@ -609,15 +655,25 @@ impl DataAvailabilityCheckerInner { return ReconstructColumnsDecision::No("block already imported"); }; - // If we're sampling all columns, it means we must be custodying all columns. + let Some(epoch) = pending_components + .verified_data_columns + .first() + .map(|c| c.as_data_column().epoch()) + else { + return ReconstructColumnsDecision::No("not enough columns"); + }; + let total_column_count = T::EthSpec::number_of_columns(); + let sampling_column_count = self + .custody_context + .num_of_data_columns_to_sample(epoch, &self.spec); let received_column_count = pending_components.verified_data_columns.len(); if pending_components.reconstruction_started { return ReconstructColumnsDecision::No("already started"); } - if received_column_count >= total_column_count { - return ReconstructColumnsDecision::No("all columns received"); + if received_column_count >= sampling_column_count { + return ReconstructColumnsDecision::No("all sampling columns received"); } if received_column_count < total_column_count / 2 { return ReconstructColumnsDecision::No("not enough columns"); @@ -637,9 +693,47 @@ impl DataAvailabilityCheckerInner { } } + /// Inserts a pre executed block into the cache. + /// - This does NOT trigger the availability check as the block still needs to be executed. + /// - This does NOT override an existing cached block to avoid overwriting an executed block. + pub fn put_pre_execution_block( + &self, + block_root: Hash256, + block: Arc>, + source: BlockImportSource, + ) -> Result<(), AvailabilityCheckError> { + let epoch = block.epoch(); + let pending_components = + self.update_or_insert_pending_components(block_root, epoch, |pending_components| { + pending_components.insert_pre_execution_block(block, source); + Ok(()) + })?; + + let num_expected_columns_opt = self.get_num_expected_columns(epoch); + + pending_components.span.in_scope(|| { + debug!( + component = "pre execution block", + status = pending_components.status_str(num_expected_columns_opt), + "Component added to data availability checker" + ); + }); + + Ok(()) + } + + /// Removes a pre-execution block from the cache. + /// This does NOT remove an existing executed block. + pub fn remove_pre_execution_block(&self, block_root: &Hash256) { + // The read lock is immediately dropped so we can safely remove the block from the cache. + if let Some(BlockProcessStatus::NotValidated(_, _)) = self.get_cached_block(block_root) { + self.critical.write().pop(block_root); + } + } + /// Check if we have all the blobs for a block. If we do, return the Availability variant that /// triggers import of the block. - pub fn put_pending_executed_block( + pub fn put_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { @@ -657,14 +751,7 @@ impl DataAvailabilityCheckerInner { Ok(()) })?; - let num_expected_columns_opt = if self.spec.is_peer_das_enabled_for_epoch(epoch) { - let num_of_column_samples = self - .custody_context - .num_of_data_columns_to_sample(epoch, &self.spec); - Some(num_of_column_samples) - } else { - None - }; + let num_expected_columns_opt = self.get_num_expected_columns(epoch); pending_components.span.in_scope(|| { debug!( @@ -681,6 +768,17 @@ impl DataAvailabilityCheckerInner { ) } + fn get_num_expected_columns(&self, epoch: Epoch) -> Option { + if self.spec.is_peer_das_enabled_for_epoch(epoch) { + let num_of_column_samples = self + .custody_context + .num_of_data_columns_to_sample(epoch, &self.spec); + Some(num_of_column_samples) + } else { + None + } + } + /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { // clean up any lingering states in the state cache @@ -954,7 +1052,7 @@ mod test { ); assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache - .put_pending_executed_block(pending_block) + .put_executed_block(pending_block) .expect("should put block"); if blobs_expected == 0 { assert!( @@ -1021,7 +1119,7 @@ mod test { ); } let availability = cache - .put_pending_executed_block(pending_block) + .put_executed_block(pending_block) .expect("should put block"); assert!( matches!(availability, Availability::Available(_)), @@ -1083,7 +1181,7 @@ mod test { // put the block in the cache let availability = cache - .put_pending_executed_block(pending_block) + .put_executed_block(pending_block) .expect("should put block"); // grab the diet block from the cache for later testing @@ -1091,12 +1189,7 @@ mod test { .critical .read() .peek(&block_root) - .map(|pending_components| { - pending_components - .executed_block - .clone() - .expect("should exist") - }) + .and_then(|pending_components| pending_components.get_diet_block().cloned()) .expect("should exist"); pushed_diet_blocks.push_back(diet_block); @@ -1257,7 +1350,7 @@ mod pending_components_tests { } pub fn assert_cache_consistent(cache: PendingComponents, max_len: usize) { - if let Some(cached_block) = cache.get_cached_block() { + if let Some(cached_block) = &cache.block { let cached_block_commitments = cached_block.get_commitments(); for index in 0..max_len { let block_commitment = cached_block_commitments.get(index).copied(); @@ -1363,4 +1456,38 @@ mod pending_components_tests { assert_cache_consistent(cache, max_len); } + + #[test] + fn should_not_insert_pre_execution_block_if_executed_block_exists() { + let (pre_execution_block, blobs, random_blobs, max_len) = pre_setup(); + let (executed_block, _blobs, _random_blobs) = + setup_pending_components(pre_execution_block.clone(), blobs, random_blobs); + + let block_root = pre_execution_block.canonical_root(); + let mut pending_component = >::empty(block_root, max_len); + + let pre_execution_block = Arc::new(pre_execution_block); + pending_component + .insert_pre_execution_block(pre_execution_block.clone(), BlockImportSource::Gossip); + assert!( + matches!( + pending_component.block, + Some(CachedBlock::PreExecution(_, _)) + ), + "pre execution block inserted" + ); + + pending_component.insert_executed_block(executed_block); + assert!( + matches!(pending_component.block, Some(CachedBlock::Executed(_))), + "executed block inserted" + ); + + pending_component + .insert_pre_execution_block(pre_execution_block, BlockImportSource::Gossip); + assert!( + matches!(pending_component.block, Some(CachedBlock::Executed(_))), + "executed block should remain" + ); + } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 57c236efcf..24f9237e3c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -10,7 +10,6 @@ use state_processing::BlockReplayer; use std::sync::Arc; use store::OnDiskConsensusContext; use tracing::{Span, debug_span, instrument}; -use types::beacon_block_body::KzgCommitments; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except @@ -43,15 +42,6 @@ impl DietAvailabilityPendingExecutedBlock { .map_or(0, |commitments| commitments.len()) } - pub fn get_commitments(&self) -> KzgCommitments { - self.as_block() - .message() - .body() - .blob_kzg_commitments() - .cloned() - .unwrap_or_default() - } - /// Returns the epoch corresponding to `self.slot()`. pub fn epoch(&self) -> Epoch { self.block.slot().epoch(E::slots_per_epoch()) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 608e003a22..01e79c49aa 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -1,7 +1,5 @@ -use crate::beacon_proposer_cache::EpochBlockProposers; use crate::block_verification::{ - BlockSlashInfo, cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, - process_block_slash_info, + BlockSlashInfo, get_validator_pubkey_cache, process_block_slash_info, }; use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; use crate::observed_data_sidecars::{ObservationStrategy, Observe}; @@ -163,6 +161,15 @@ pub enum GossipDataColumnError { /// /// The column sidecar is invalid and the peer is faulty InconsistentProofsLength { cells_len: usize, proofs_len: usize }, + /// The number of KZG commitments exceeds the maximum number of blobs allowed for the fork. The + /// sidecar is invalid. + /// + /// ## Peer scoring + /// The column sidecar is invalid and the peer is faulty + MaxBlobsPerBlockExceeded { + max_blobs_per_block: usize, + commitments_len: usize, + }, } impl From for GossipDataColumnError { @@ -222,7 +229,7 @@ impl GossipVerifiedDataColumn column_sidecar: Arc>, chain: &BeaconChain, ) -> Result { - verify_data_column_sidecar(&column_sidecar)?; + verify_data_column_sidecar(&column_sidecar, &chain.spec)?; // Check if the data column is already in the DA checker cache. This happens when data columns // are made available through the `engine_getBlobs` method. If it exists in the cache, we know @@ -477,7 +484,7 @@ pub fn validate_data_column_sidecar_for_gossip, ) -> Result, GossipDataColumnError> { let column_slot = data_column.slot(); - verify_data_column_sidecar(&data_column)?; + verify_data_column_sidecar(&data_column, &chain.spec)?; verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; verify_sidecar_not_from_future_slot(chain, column_slot)?; verify_slot_greater_than_latest_finalized_slot(chain, column_slot)?; @@ -531,6 +538,7 @@ pub fn validate_data_column_sidecar_for_gossip( data_column: &DataColumnSidecar, + spec: &ChainSpec, ) -> Result<(), GossipDataColumnError> { if data_column.index >= E::number_of_columns() as u64 { return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index)); @@ -542,6 +550,14 @@ fn verify_data_column_sidecar( let cells_len = data_column.column.len(); let commitments_len = data_column.kzg_commitments.len(); let proofs_len = data_column.kzg_proofs.len(); + let max_blobs_per_block = spec.max_blobs_per_block(data_column.epoch()) as usize; + + if commitments_len > max_blobs_per_block { + return Err(GossipDataColumnError::MaxBlobsPerBlockExceeded { + max_blobs_per_block, + commitments_len, + }); + } if cells_len != commitments_len { return Err(GossipDataColumnError::InconsistentCommitmentsLength { @@ -610,22 +626,21 @@ fn verify_parent_block_and_finalized_descendant( chain: &BeaconChain, ) -> Result { let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let block_parent_root = data_column.block_parent_root(); + + // Do not process a column that does not descend from the finalized root. + if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) { + return Err(GossipDataColumnError::NotFinalizedDescendant { block_parent_root }); + } // We have already verified that the column is past finalization, so we can // just check fork choice for the block's parent. - let block_parent_root = data_column.block_parent_root(); let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { return Err(GossipDataColumnError::ParentUnknown { parent_root: block_parent_root, }); }; - // Do not process a column that does not descend from the finalized root. - // We just loaded the parent_block, so we can be sure that it exists in fork choice. - if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) { - return Err(GossipDataColumnError::NotFinalizedDescendant { block_parent_root }); - } - Ok(parent_block) } @@ -641,65 +656,34 @@ fn verify_proposer_and_signature( let block_root = data_column.block_root(); let block_parent_root = data_column.block_parent_root(); - let proposer_shuffling_root = if parent_block.slot.epoch(slots_per_epoch) == column_epoch { - parent_block - .next_epoch_shuffling_id - .shuffling_decision_block - } else { - parent_block.root - }; + let proposer_shuffling_root = + parent_block.proposer_shuffling_root_for_child_block(column_epoch, &chain.spec); - // We lock the cache briefly to get or insert a OnceCell, then drop the lock - // before doing proposer shuffling calculation via `OnceCell::get_or_try_init`. This avoids - // holding the lock during the computation, while still ensuring the result is cached and - // initialised only once. - // - // This approach exposes the cache internals (`OnceCell` & `EpochBlockProposers`) - // as a trade-off for avoiding lock contention. - let epoch_proposers_cell = chain - .beacon_proposer_cache - .lock() - .get_or_insert_key(column_epoch, proposer_shuffling_root); - - let epoch_proposers = epoch_proposers_cell.get_or_try_init(move || { - debug!( - %block_root, - index = %column_index, - "Proposer shuffling cache miss for column verification" - ); - let (parent_state_root, mut parent_state) = chain - .store - .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) - .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? - .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing state for parent block {block_parent_root:?}", - )) - })?; - - let state = cheap_state_advance_to_obtain_committees::<_, GossipDataColumnError>( - &mut parent_state, - Some(parent_state_root), - column_slot, - &chain.spec, - )?; - - let epoch = state.current_epoch(); - let proposers = state.get_beacon_proposer_indices(epoch, &chain.spec)?; - // Prime the proposer shuffling cache with the newly-learned value. - Ok::<_, GossipDataColumnError>(EpochBlockProposers { - epoch: column_epoch, - fork: state.fork(), - proposers: proposers.into(), - }) - })?; - - let proposer_index = *epoch_proposers - .proposers - .get(column_slot.as_usize() % slots_per_epoch as usize) - .ok_or_else(|| BeaconChainError::NoProposerForSlot(column_slot))?; - - let fork = epoch_proposers.fork; + let proposer = chain.with_proposer_cache( + proposer_shuffling_root, + column_epoch, + |proposers| proposers.get_slot::(column_slot), + || { + debug!( + %block_root, + index = %column_index, + "Proposer shuffling cache miss for column verification" + ); + chain + .store + .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) + .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? + .ok_or_else(|| { + GossipDataColumnError::BeaconChainError(Box::new( + BeaconChainError::DBInconsistent(format!( + "Missing state for parent block {block_parent_root:?}", + )), + )) + }) + }, + )?; + let proposer_index = proposer.index; + let fork = proposer.fork; // Signature verify the signed block header. let signature_is_valid = { @@ -816,16 +800,22 @@ pub fn observe_gossip_data_column( #[cfg(test)] mod test { use crate::data_column_verification::{ - GossipDataColumnError, validate_data_column_sidecar_for_gossip, + GossipDataColumnError, GossipVerifiedDataColumn, validate_data_column_sidecar_for_gossip, }; use crate::observed_data_sidecars::Observe; - use crate::test_utils::BeaconChainHarness; + use crate::test_utils::{ + BeaconChainHarness, EphemeralHarnessType, generate_data_column_sidecars_from_block, + }; + use eth2::types::BlobsBundle; + use execution_layer::test_utils::generate_blobs; + use std::sync::Arc; use types::{DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkName, MainnetEthSpec}; type E = MainnetEthSpec; #[tokio::test] - async fn empty_data_column_sidecars_fails_validation() { + async fn test_validate_data_column_sidecar_for_gossip() { + // Setting up harness is slow, we initialise once and use it for all gossip validation tests. let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec.into()) @@ -835,6 +825,44 @@ mod test { .build(); harness.advance_slot(); + let verify_fn = |column_sidecar: DataColumnSidecar| { + let col_index = column_sidecar.index; + validate_data_column_sidecar_for_gossip::<_, Observe>( + column_sidecar.into(), + DataColumnSubnetId::from_column_index(col_index, &harness.spec), + &harness.chain, + ) + }; + empty_data_column_sidecars_fails_validation(&harness, &verify_fn).await; + data_column_sidecar_commitments_exceed_max_blobs_per_block(&harness, &verify_fn).await; + } + + #[tokio::test] + async fn test_new_for_block_publishing() { + // Setting up harness is slow, we initialise once and use it for all gossip validation tests. + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + harness.advance_slot(); + + let verify_fn = |column_sidecar: DataColumnSidecar| { + GossipVerifiedDataColumn::<_>::new_for_block_publishing( + column_sidecar.into(), + &harness.chain, + ) + }; + empty_data_column_sidecars_fails_validation(&harness, &verify_fn).await; + data_column_sidecar_commitments_exceed_max_blobs_per_block(&harness, &verify_fn).await; + } + + async fn empty_data_column_sidecars_fails_validation( + harness: &BeaconChainHarness>, + verify_fn: &impl Fn(DataColumnSidecar) -> Result, + ) { let slot = harness.get_current_slot(); let state = harness.get_current_state(); let ((block, _blobs_opt), _state) = harness @@ -857,14 +885,47 @@ mod test { .unwrap(), }; - let result = validate_data_column_sidecar_for_gossip::<_, Observe>( - column_sidecar.into(), - DataColumnSubnetId::from_column_index(index, &harness.spec), - &harness.chain, - ); + let result = verify_fn(column_sidecar); assert!(matches!( result.err(), Some(GossipDataColumnError::UnexpectedDataColumn) )); } + + async fn data_column_sidecar_commitments_exceed_max_blobs_per_block( + harness: &BeaconChainHarness>, + verify_fn: &impl Fn(DataColumnSidecar) -> Result, + ) { + let slot = harness.get_current_slot(); + let epoch = slot.epoch(E::slots_per_epoch()); + let state = harness.get_current_state(); + let max_blobs_per_block = harness.spec.max_blobs_per_block(epoch) as usize; + let fork = harness.spec.fork_name_at_epoch(epoch); + + // Generate data column sidecar with blob count exceeding max_blobs_per_block. + let blob_count = max_blobs_per_block + 1; + let BlobsBundle:: { + commitments: preloaded_commitments_single, + proofs: _, + blobs: _, + } = generate_blobs(1, fork).unwrap().0; + + let ((block, _blobs_opt), _state) = harness + .make_block_with_modifier(state, slot, |block| { + *block.body_mut().blob_kzg_commitments_mut().unwrap() = + vec![preloaded_commitments_single[0]; blob_count].into(); + }) + .await; + + let column_sidecar = generate_data_column_sidecars_from_block(&block, &harness.spec) + .into_iter() + .next() + .unwrap(); + + let result = verify_fn(Arc::try_unwrap(column_sidecar).unwrap()); + assert!(matches!( + result.err(), + Some(GossipDataColumnError::MaxBlobsPerBlockExceeded { .. }) + )); + } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index a1a0ec74f6..7b04a36fae 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -230,6 +230,23 @@ pub enum BeaconChainError { columns_found: usize, }, FailedToReconstructBlobs(String), + ProposerCacheIncorrectState { + state_decision_block_root: Hash256, + requested_decision_block_root: Hash256, + }, + ProposerCacheAccessorFailure { + decision_block_root: Hash256, + proposal_epoch: Epoch, + }, + ProposerCacheOutOfBounds { + slot: Slot, + epoch: Epoch, + }, + ProposerCacheWrongEpoch { + request_epoch: Epoch, + cache_epoch: Epoch, + }, + SkipProposerPreparation, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 8b9fb5e354..15e0a55cf5 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -140,7 +140,7 @@ impl BeaconChain { // Store the blobs or data columns too if let Some(op) = self - .get_blobs_or_columns_store_op(block_root, block_data) + .get_blobs_or_columns_store_op(block_root, block.slot(), block_data) .map_err(|e| { HistoricalBlockError::StoreError(StoreError::DBError { message: format!("get_blobs_or_columns_store_op error {e:?}"), diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 2147ed5966..382775ab50 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -174,6 +174,13 @@ pub fn blobs_to_data_column_sidecars( let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; let signed_block_header = block.signed_block_header(); + if cell_proofs.len() != blobs.len() * E::number_of_columns() { + return Err(DataColumnSidecarError::InvalidCellProofLength { + expected: blobs.len() * E::number_of_columns(), + actual: cell_proofs.len(), + }); + } + let proof_chunks = cell_proofs .chunks_exact(E::number_of_columns()) .collect::>(); @@ -292,6 +299,8 @@ pub(crate) fn build_data_column_sidecars( /// /// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated /// with the block. +/// This function does NOT use rayon as this is primarily used by a non critical path in HTTP API +/// and it will be slow if the node needs to reconstruct the blobs pub fn reconstruct_blobs( kzg: &Kzg, data_columns: &[Arc>], @@ -313,7 +322,7 @@ pub fn reconstruct_blobs( }; let blob_sidecars = blob_indices - .into_par_iter() + .into_iter() .map(|row_index| { let mut cells: Vec = vec![]; let mut cell_ids: Vec = vec![]; @@ -330,16 +339,26 @@ pub fn reconstruct_blobs( cell_ids.push(data_column.index); } - let (cells, _kzg_proofs) = kzg - .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) - .map_err(|e| format!("Failed to recover cells and compute KZG proofs: {e:?}"))?; + let num_cells_original_blob = E::number_of_columns() / 2; + let blob_bytes = if data_columns.len() < E::number_of_columns() { + let (recovered_cells, _kzg_proofs) = kzg + .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + .map_err(|e| { + format!("Failed to recover cells and compute KZG proofs: {e:?}") + })?; - let num_cells_original_blob = cells.len() / 2; - let blob_bytes = cells - .into_iter() - .take(num_cells_original_blob) - .flat_map(|cell| cell.into_iter()) - .collect(); + recovered_cells + .into_iter() + .take(num_cells_original_blob) + .flat_map(|cell| cell.into_iter()) + .collect() + } else { + cells + .into_iter() + .take(num_cells_original_blob) + .flat_map(|cell| (*cell).into_iter()) + .collect() + }; let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; let kzg_proof = KzgProof::empty(); diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 1b57bad104..0d34ffdcd1 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -458,12 +458,6 @@ pub static BEACON_EARLY_ATTESTER_CACHE_HITS: LazyLock> = Lazy ) }); -pub static BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "beacon_reqresp_pre_import_cache_size", - "Current count of items of the reqresp pre import cache", - ) -}); pub static BEACON_REQRESP_PRE_IMPORT_CACHE_HITS: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -578,6 +572,14 @@ pub static FORK_CHOICE_READ_LOCK_AQUIRE_TIMES: LazyLock> = Laz exponential_buckets(1e-4, 4.0, 7), ) }); +pub static FORK_CHOICE_UPGRADABLE_READ_LOCK_AQUIRE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_fork_choice_upgradable_read_lock_aquire_seconds", + "Time taken to aquire the fork-choice upgradable read lock", + exponential_buckets(1e-4, 4.0, 7), + ) + }); pub static FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( "beacon_fork_choice_write_lock_aquire_seconds", @@ -1957,7 +1959,6 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { } let attestation_stats = beacon_chain.op_pool.attestation_stats(); - let chain_metrics = beacon_chain.metrics(); // Kept duplicated for backwards compatibility set_gauge_by_usize( @@ -1965,11 +1966,6 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { beacon_chain.store.state_cache_len(), ); - set_gauge_by_usize( - &BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE, - chain_metrics.reqresp_pre_import_cache_len, - ); - let da_checker_metrics = beacon_chain.data_availability_checker.metrics(); set_gauge_by_usize( &DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE, diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 27c2c7c0a1..87348cb01b 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -33,7 +33,7 @@ use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, Relative /// /// This avoids doing unnecessary work whilst the node is syncing or has perhaps been put to sleep /// for some period of time. -const MAX_ADVANCE_DISTANCE: u64 = 4; +const MAX_ADVANCE_DISTANCE: u64 = 256; /// Similarly for fork choice: avoid the fork choice lookahead during sync. /// @@ -49,17 +49,7 @@ enum Error { HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), BeaconState(#[allow(dead_code)] BeaconStateError), Store(#[allow(dead_code)] store::Error), - MaxDistanceExceeded { - current_slot: Slot, - head_slot: Slot, - }, - StateAlreadyAdvanced { - block_root: Hash256, - }, - BadStateSlot { - _state_slot: Slot, - _block_slot: Slot, - }, + MaxDistanceExceeded { current_slot: Slot, head_slot: Slot }, } impl From for Error { @@ -180,9 +170,6 @@ async fn state_advance_timer( error = ?e, "Failed to advance head state" ), - Err(Error::StateAlreadyAdvanced { block_root }) => { - debug!(?block_root, "State already advanced on slot") - } Err(Error::MaxDistanceExceeded { current_slot, head_slot, @@ -295,25 +282,6 @@ fn advance_head(beacon_chain: &Arc>) -> Resu .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; - // Protect against advancing a state more than a single slot. - // - // Advancing more than one slot without storing the intermediate state would corrupt the - // database. Future works might store intermediate states inside this function. - match state.slot().cmp(&state.latest_block_header().slot) { - std::cmp::Ordering::Equal => (), - std::cmp::Ordering::Greater => { - return Err(Error::StateAlreadyAdvanced { - block_root: head_block_root, - }); - } - std::cmp::Ordering::Less => { - return Err(Error::BadStateSlot { - _block_slot: state.latest_block_header().slot, - _state_slot: state.slot(), - }); - } - } - let initial_slot = state.slot(); let initial_epoch = state.current_epoch(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c2230ba057..1d57550156 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -3380,7 +3380,7 @@ pub fn generate_rand_block_and_data_columns( } /// Generate data column sidecars from pre-computed cells and proofs. -fn generate_data_column_sidecars_from_block( +pub fn generate_data_column_sidecars_from_block( block: &SignedBeaconBlock, spec: &ChainSpec, ) -> DataColumnSidecarList { diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 1c89624f9d..3ab76828c9 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -130,7 +130,7 @@ pub struct CustodyContext { /// and enr values. validator_custody_count: AtomicU64, /// Is the node run as a supernode based on current cli parameters. - pub current_is_supernode: bool, + current_is_supernode: bool, /// The persisted value for `is_supernode` based on the previous run of this node. /// /// Note: We require this value because if a user restarts the node with a higher cli custody @@ -307,6 +307,14 @@ impl CustodyContext { .expect("should compute node sampling size from valid chain spec") } + /// Returns whether the node should attempt reconstruction at a given epoch. + pub fn should_attempt_reconstruction(&self, epoch: Epoch, spec: &ChainSpec) -> bool { + let min_columns_for_reconstruction = E::number_of_columns() / 2; + // performing reconstruction is not necessary if sampling column count is exactly 50%, + // because the node doesn't need the remaining columns. + self.num_of_data_columns_to_sample(epoch, spec) > min_columns_for_reconstruction + } + /// Returns the ordered list of column indices that should be sampled for data availability checking at the given epoch. /// /// # Parameters diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 23f1a7d430..00c30e5ab1 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -497,7 +497,7 @@ impl ValidatorMonitor { }); // Add missed non-finalized blocks for the monitored validators - self.add_validators_missed_blocks(state); + self.add_validators_missed_blocks(state, spec); self.process_unaggregated_attestations(state, spec); // Update metrics for individual validators. @@ -588,7 +588,7 @@ impl ValidatorMonitor { } /// Add missed non-finalized blocks for the monitored validators - fn add_validators_missed_blocks(&mut self, state: &BeaconState) { + fn add_validators_missed_blocks(&mut self, state: &BeaconState, spec: &ChainSpec) { // Define range variables let current_slot = state.slot(); let current_epoch = current_slot.epoch(E::slots_per_epoch()); @@ -616,8 +616,8 @@ impl ValidatorMonitor { if block_root == prev_block_root { let slot_epoch = slot.epoch(E::slots_per_epoch()); - if let Ok(shuffling_decision_block) = - state.proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root) + if let Ok(shuffling_decision_block) = state + .proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root, spec) { // Update the cache if it has not yet been initialised, or if it is // initialised for a prior epoch. This is an optimisation to avoid bouncing diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 58ca4a032e..47f5be02cb 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -42,7 +42,10 @@ enum DataSidecars { } async fn get_chain_segment() -> (Vec>, Vec>>) { - let harness = get_harness(VALIDATOR_COUNT); + // The assumption that you can re-import a block based on what you have in your DB + // is no longer true, as fullnodes stores less than what they sample. + // We use a supernode here to build a chain segment. + let harness = get_harness(VALIDATOR_COUNT, true); harness .extend_chain( @@ -101,7 +104,10 @@ async fn get_chain_segment() -> (Vec>, Vec BeaconChainHarness> { +fn get_harness( + validator_count: usize, + supernode: bool, +) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .chain_config(ChainConfig { @@ -109,6 +115,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness( #[tokio::test] async fn chain_segment_full_segment() { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() @@ -290,7 +297,7 @@ async fn chain_segment_full_segment() { #[tokio::test] async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() @@ -322,7 +329,7 @@ async fn chain_segment_varying_chunk_size() { #[tokio::test] async fn chain_segment_non_linear_parent_roots() { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness @@ -379,7 +386,7 @@ async fn chain_segment_non_linear_parent_roots() { #[tokio::test] async fn chain_segment_non_linear_slots() { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness .chain @@ -521,7 +528,7 @@ async fn assert_invalid_signature( async fn get_invalid_sigs_harness( chain_segment: &[BeaconSnapshot], ) -> BeaconChainHarness> { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); harness .chain .slot_clock @@ -979,7 +986,7 @@ fn unwrap_err(result: Result) -> U { #[tokio::test] async fn block_gossip_verification() { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -1382,7 +1389,7 @@ async fn verify_block_for_gossip_slashing_detection() { #[tokio::test] async fn verify_block_for_gossip_doppelganger_detection() { - let harness = get_harness(VALIDATOR_COUNT); + let harness = get_harness(VALIDATOR_COUNT, false); let state = harness.get_current_state(); let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await; @@ -1730,6 +1737,8 @@ async fn add_altair_block_to_base_chain() { )); } +// This is a regression test for this bug: +// https://github.com/sigp/lighthouse/issues/4332#issuecomment-1565092279 #[tokio::test] async fn import_duplicate_block_unrealized_justification() { let spec = MainnetEthSpec::default_spec(); @@ -1791,7 +1800,7 @@ async fn import_duplicate_block_unrealized_justification() { .await .unwrap(); - // Unrealized justification should NOT have updated. + // The store's global unrealized justification should update immediately and match the block. let unrealized_justification = { let fc = chain.canonical_head.fork_choice_read_lock(); assert_eq!(fc.justified_checkpoint().epoch, 0); @@ -1808,9 +1817,12 @@ async fn import_duplicate_block_unrealized_justification() { }; // Import the second verified block, simulating a block processed via RPC. - import_execution_pending_block(chain.clone(), verified_block2) - .await - .unwrap(); + assert_eq!( + import_execution_pending_block(chain.clone(), verified_block2) + .await + .unwrap_err(), + format!("DuplicateFullyImported({block_root})") + ); // Unrealized justification should still be updated. let fc3 = chain.canonical_head.fork_choice_read_lock(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index fbb592b510..449b5dd043 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1191,6 +1191,316 @@ fn check_shuffling_compatible( } } +/// These tests check the consistency of: +/// +/// - ProtoBlock::proposer_shuffling_root_for_child_block, and +/// - BeaconState::proposer_shuffling_decision_root{_at_epoch} +async fn proposer_shuffling_root_consistency_test( + spec: ChainSpec, + parent_slot: u64, + child_slot: u64, +) { + let child_slot = Slot::new(child_slot); + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, Default::default(), spec.clone()); + let validators_keypairs = + types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); + let harness = TestHarness::builder(MinimalEthSpec) + .spec(spec.into()) + .keypairs(validators_keypairs) + .fresh_disk_store(store) + .mock_execution_layer() + .build(); + let spec = &harness.chain.spec; + + // Build chain out to parent block. + let initial_slots: Vec = (1..=parent_slot).map(Into::into).collect(); + let (state, state_root) = harness.get_current_state_and_root(); + let all_validators = harness.get_all_validators(); + let (_, _, parent_root, _) = harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; + + // Add the child block. + let (state, state_root) = harness.get_current_state_and_root(); + let all_validators = harness.get_all_validators(); + let (_, _, child_root, child_block_state) = harness + .add_attested_blocks_at_slots(state, state_root, &[child_slot], &all_validators) + .await; + + let child_block_epoch = child_slot.epoch(E::slots_per_epoch()); + + // Load parent block from fork choice. + let fc_parent = harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&parent_root.into()) + .unwrap(); + + // The proposer shuffling decision root computed using fork choice should equal the root + // computed from the child state. + let decision_root = fc_parent.proposer_shuffling_root_for_child_block(child_block_epoch, spec); + + assert_eq!( + decision_root, + child_block_state + .proposer_shuffling_decision_root(child_root.into(), spec) + .unwrap() + ); + assert_eq!( + decision_root, + child_block_state + .proposer_shuffling_decision_root_at_epoch(child_block_epoch, child_root.into(), spec) + .unwrap() + ); + + // The passed block root argument should be irrelevant for all blocks except the genesis block. + assert_eq!( + decision_root, + child_block_state + .proposer_shuffling_decision_root(Hash256::ZERO, spec) + .unwrap() + ); + assert_eq!( + decision_root, + child_block_state + .proposer_shuffling_decision_root_at_epoch(child_block_epoch, Hash256::ZERO, spec) + .unwrap() + ); +} + +#[tokio::test] +async fn proposer_shuffling_root_consistency_same_epoch() { + let spec = test_spec::(); + proposer_shuffling_root_consistency_test(spec, 32, 39).await; +} + +#[tokio::test] +async fn proposer_shuffling_root_consistency_next_epoch() { + let spec = test_spec::(); + proposer_shuffling_root_consistency_test(spec, 32, 47).await; +} + +#[tokio::test] +async fn proposer_shuffling_root_consistency_two_epochs() { + let spec = test_spec::(); + proposer_shuffling_root_consistency_test(spec, 32, 55).await; +} + +#[tokio::test] +async fn proposer_shuffling_root_consistency_at_fork_boundary() { + let mut spec = ForkName::Electra.make_genesis_spec(E::default_spec()); + spec.fulu_fork_epoch = Some(Epoch::new(4)); + + // Parent block in epoch prior to Fulu fork epoch, child block in Fulu fork epoch. + proposer_shuffling_root_consistency_test( + spec.clone(), + 3 * E::slots_per_epoch(), + 4 * E::slots_per_epoch(), + ) + .await; + + // Parent block and child block in Fulu fork epoch. + proposer_shuffling_root_consistency_test( + spec.clone(), + 4 * E::slots_per_epoch(), + 4 * E::slots_per_epoch() + 1, + ) + .await; + + // Parent block in Fulu fork epoch and child block in epoch after. + proposer_shuffling_root_consistency_test( + spec.clone(), + 4 * E::slots_per_epoch(), + 5 * E::slots_per_epoch(), + ) + .await; + + // Parent block in epoch prior and child block in epoch after. + proposer_shuffling_root_consistency_test( + spec, + 3 * E::slots_per_epoch(), + 5 * E::slots_per_epoch(), + ) + .await; +} + +#[tokio::test] +async fn proposer_shuffling_changing_with_lookahead() { + let initial_blocks = E::slots_per_epoch() * 4 - 1; + + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, Default::default(), spec.clone()); + let validators_keypairs = + types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); + let harness = TestHarness::builder(MinimalEthSpec) + .spec(spec.into()) + .keypairs(validators_keypairs) + .fresh_disk_store(store) + .mock_execution_layer() + .build(); + let spec = &harness.chain.spec; + + // Start with some blocks, finishing with one slot before a new epoch. + harness.advance_slot(); + harness + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let pre_deposit_state = harness.get_current_state(); + assert_eq!(pre_deposit_state.slot(), initial_blocks); + let topup_block_slot = Slot::new(initial_blocks + 1); + let validator_to_topup_index = 1; + let validator_to_topup = pre_deposit_state + .get_validator(validator_to_topup_index) + .unwrap() + .clone(); + + // Craft a block with a deposit request and consolidation. + // XXX: This is a really nasty way to do this, but we need better test facilities in + // MockExecutionLayer to address this. + let deposit_request: DepositRequest = DepositRequest { + index: pre_deposit_state.eth1_deposit_index(), + pubkey: validator_to_topup.pubkey, + withdrawal_credentials: validator_to_topup.withdrawal_credentials, + amount: 63_000_000_000, + signature: SignatureBytes::empty(), + }; + + let consolidation_request: ConsolidationRequest = ConsolidationRequest { + source_address: validator_to_topup + .get_execution_withdrawal_address(spec) + .unwrap(), + source_pubkey: validator_to_topup.pubkey, + target_pubkey: validator_to_topup.pubkey, + }; + + let execution_requests = ExecutionRequests:: { + deposits: VariableList::new(vec![deposit_request]).unwrap(), + withdrawals: vec![].into(), + consolidations: VariableList::new(vec![consolidation_request]).unwrap(), + }; + + let mut block = Box::pin(harness.make_block_with_modifier( + pre_deposit_state.clone(), + topup_block_slot, + |block| *block.body_mut().execution_requests_mut().unwrap() = execution_requests, + )) + .await + .0; + + let Err(BlockError::StateRootMismatch { + local: true_state_root, + .. + }) = harness + .process_block(topup_block_slot, block.0.canonical_root(), block.clone()) + .await + else { + panic!("state root should not match due to pending deposits changes/etc"); + }; + let mut new_block = block.0.message_fulu().unwrap().clone(); + new_block.state_root = true_state_root; + block.0 = Arc::new(harness.sign_beacon_block(new_block.into(), &pre_deposit_state)); + + harness + .process_block(topup_block_slot, block.0.canonical_root(), block.clone()) + .await + .unwrap(); + + // Advance two epochs to finalize the deposit and process it. + // Start with just a single epoch advance so we can grab the state one epoch prior to where + // we end up. + harness.advance_slot(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Grab the epoch start state. This is the state from which the proposers at the next epoch were + // computed. + let prev_epoch_state = harness.get_current_state(); + assert_eq!(prev_epoch_state.slot() % E::slots_per_epoch(), 0); + + // The deposit should be pending. + let pending_deposits = prev_epoch_state.pending_deposits().unwrap(); + assert_eq!(pending_deposits.len(), 1, "{pending_deposits:?}"); + + // Advance the 2nd epoch to finalize the deposit and process it. + harness.advance_slot(); + harness + .extend_chain( + E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let current_epoch_state = harness.get_current_state(); + assert_eq!(current_epoch_state.slot() % E::slots_per_epoch(), 0); + + // Deposit is processed! + let pending_deposits = current_epoch_state.pending_deposits().unwrap(); + assert_eq!(pending_deposits.len(), 0, "{pending_deposits:?}"); + + let validator = current_epoch_state + .get_validator(validator_to_topup_index) + .unwrap(); + assert!(validator.has_compounding_withdrawal_credential(spec)); + assert_eq!(validator.effective_balance, 95_000_000_000); + + // The shuffling for the current epoch from `prev_epoch_state` should match the shuffling + // for the current epoch from `current_epoch_state` because we should be correctly using the + // stored lookahead. + let current_epoch = current_epoch_state.current_epoch(); + let proposer_shuffling = prev_epoch_state + .get_beacon_proposer_indices(current_epoch, spec) + .unwrap(); + + assert_eq!( + proposer_shuffling, + current_epoch_state + .get_beacon_proposer_indices(current_epoch, spec) + .unwrap() + ); + + // If we bypass the safety checks in `get_proposer_indices`, we should see that the shuffling + // differs due to the effective balance change. + let unsafe_get_proposer_indices = |state: &BeaconState, epoch| -> Vec { + let indices = state.get_active_validator_indices(epoch, spec).unwrap(); + let preimage = state.get_seed(epoch, Domain::BeaconProposer, spec).unwrap(); + epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let mut preimage = preimage.to_vec(); + preimage.append(&mut int_to_bytes::int_to_bytes8(slot.as_u64())); + let seed = ethereum_hashing::hash(&preimage); + state.compute_proposer_index(&indices, &seed, spec).unwrap() + }) + .collect() + }; + + // The unsafe function is correct when used with lookahead. + assert_eq!( + unsafe_get_proposer_indices(&prev_epoch_state, current_epoch), + proposer_shuffling + ); + + // Computing the shuffling for current epoch without lookahead is WRONG. + assert_ne!( + unsafe_get_proposer_indices(¤t_epoch_state, current_epoch), + proposer_shuffling, + ); +} + // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { @@ -2425,6 +2735,14 @@ async fn weak_subjectivity_sync_test( .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); + beacon_chain + .data_availability_checker + .custody_context() + .init_ordered_data_columns_from_custody_groups( + (0..spec.number_of_custody_groups).collect(), + &spec, + ) + .unwrap(); let beacon_chain = Arc::new(beacon_chain); let wss_block_root = wss_block.canonical_root(); @@ -3827,6 +4145,88 @@ async fn replay_from_split_state() { assert_eq!(state.slot(), split.slot); } +/// Test that regular nodes filter and store only custody columns when processing blocks with data columns. +#[tokio::test] +async fn test_custody_column_filtering_regular_node() { + // Skip test if PeerDAS is not scheduled + if !test_spec::().is_peer_das_scheduled() { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Generate a block with data columns + harness.execution_block_generator().set_min_blob_count(1); + let current_slot = harness.get_current_slot(); + let block_root = harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Get custody columns for this epoch - regular nodes only store a subset + let expected_custody_columns: HashSet<_> = harness + .chain + .custody_columns_for_epoch(Some(current_slot.epoch(E::slots_per_epoch()))) + .iter() + .copied() + .collect(); + + // Check what actually got stored in the database + let stored_column_indices: HashSet<_> = store + .get_data_column_keys(block_root) + .expect("should get stored column keys") + .into_iter() + .collect(); + + assert_eq!( + stored_column_indices, expected_custody_columns, + "Regular node should only store custody columns" + ); +} + +/// Test that supernodes store all data columns when processing blocks with data columns. +#[tokio::test] +async fn test_custody_column_filtering_supernode() { + // Skip test if PeerDAS is not scheduled + if !test_spec::().is_peer_das_scheduled() { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); + + // Generate a block with data columns + harness.execution_block_generator().set_min_blob_count(1); + let block_root = harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Supernodes are expected to store all data columns + let expected_custody_columns: HashSet<_> = (0..E::number_of_columns() as u64).collect(); + + // Check what actually got stored in the database + let stored_column_indices: HashSet<_> = store + .get_data_column_keys(block_root) + .expect("should get stored column keys") + .into_iter() + .collect(); + + assert_eq!( + stored_column_indices, expected_custody_columns, + "Supernode should store all custody columns" + ); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 4e2554d3d8..95732abeb5 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -3,7 +3,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::validator_monitor::{MISSED_BLOCK_LAG_SLOTS, ValidatorMonitorConfig}; use std::sync::LazyLock; -use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -74,7 +74,7 @@ async fn missed_blocks_across_epochs() { .get_hot_state(state_roots_by_slot[&start_slot]) .unwrap(); let decision_root = state - .proposer_shuffling_decision_root(genesis_block_root) + .proposer_shuffling_decision_root(genesis_block_root, &harness.chain.spec) .unwrap(); proposer_shuffling_cache .insert( @@ -152,7 +152,7 @@ async fn missed_blocks_basic() { .unwrap(); let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let mut proposer_shuffling_decision_root = _state - .proposer_shuffling_decision_root(duplicate_block_root) + .proposer_shuffling_decision_root(duplicate_block_root, &harness1.chain.spec) .unwrap(); let beacon_proposer_cache = harness1 @@ -235,17 +235,20 @@ async fn missed_blocks_basic() { // Let's fill the cache with the proposers for the current epoch // and push the duplicate_block_root to the block_roots vector assert_eq!( - beacon_proposer_cache.lock().insert( - epoch, - duplicate_block_root, - validator_indexes.clone(), - _state2.fork() - ), + _state2.set_block_root(prev_slot, duplicate_block_root), Ok(()) ); + let decision_block_root = _state2 + .proposer_shuffling_decision_root_at_epoch(epoch, Hash256::ZERO, &harness2.chain.spec) + .unwrap(); assert_eq!( - _state2.set_block_root(prev_slot, duplicate_block_root), + beacon_proposer_cache.lock().insert( + epoch, + decision_block_root, + validator_indexes.clone(), + _state2.fork() + ), Ok(()) ); @@ -326,7 +329,11 @@ async fn missed_blocks_basic() { .unwrap(); missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; proposer_shuffling_decision_root = _state3 - .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) + .proposer_shuffling_decision_root_at_epoch( + epoch, + duplicate_block_root, + &harness1.chain.spec, + ) .unwrap(); let beacon_proposer_cache = harness3 diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index ab9ab045f4..28ed0cca91 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -59,7 +59,7 @@ use std::sync::Arc; use std::task::Context; use std::time::{Duration, Instant}; use strum::IntoStaticStr; -use task_executor::TaskExecutor; +use task_executor::{RayonPoolType, TaskExecutor}; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; use tracing::{debug, error, trace, warn}; @@ -181,7 +181,7 @@ impl BeaconProcessorQueueLengths { // We don't request more than `PARENT_DEPTH_TOLERANCE` (32) lookups, so we can limit // this queue size. With 48 max blobs per block, each column sidecar list could be up to 12MB. rpc_custody_column_queue: 64, - column_reconstruction_queue: 64, + column_reconstruction_queue: 1, chain_segment_queue: 64, backfill_chain_segment: 64, gossip_block_queue: 1024, @@ -603,7 +603,7 @@ pub enum Work { process_fn: BlockingFn, }, ChainSegment(AsyncFn), - ChainSegmentBackfill(AsyncFn), + ChainSegmentBackfill(BlockingFn), Status(BlockingFn), BlocksByRangeRequest(AsyncFn), BlocksByRootsRequest(AsyncFn), @@ -867,7 +867,7 @@ impl BeaconProcessor { let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); let mut column_reconstruction_queue = - FifoQueue::new(queue_lengths.column_reconstruction_queue); + LifoQueue::new(queue_lengths.column_reconstruction_queue); let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); @@ -1354,9 +1354,7 @@ impl BeaconProcessor { Work::RpcCustodyColumn { .. } => { rpc_custody_column_queue.push(work, work_id) } - Work::ColumnReconstruction(_) => { - column_reconstruction_queue.push(work, work_id) - } + Work::ColumnReconstruction(_) => column_reconstruction_queue.push(work), Work::ChainSegment { .. } => chain_segment_queue.push(work, work_id), Work::ChainSegmentBackfill { .. } => { backfill_chain_segment.push(work, work_id) @@ -1605,7 +1603,14 @@ impl BeaconProcessor { Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { task_spawner.spawn_async(work) } - Work::ChainSegmentBackfill(process_fn) => task_spawner.spawn_async(process_fn), + Work::ChainSegmentBackfill(process_fn) => { + if self.config.enable_backfill_rate_limiting { + task_spawner.spawn_blocking_with_rayon(RayonPoolType::LowPriority, process_fn) + } else { + // use the global rayon thread pool if backfill rate limiting is disabled. + task_spawner.spawn_blocking(process_fn) + } + } Work::ApiRequestP0(process_fn) | Work::ApiRequestP1(process_fn) => match process_fn { BlockingOrAsync::Blocking(process_fn) => task_spawner.spawn_blocking(process_fn), BlockingOrAsync::Async(process_fn) => task_spawner.spawn_async(process_fn), @@ -1667,6 +1672,21 @@ impl TaskSpawner { WORKER_TASK_NAME, ) } + + /// Spawns a blocking task on a rayon thread pool, dropping the `SendOnDrop` after task completion. + fn spawn_blocking_with_rayon(self, rayon_pool_type: RayonPoolType, task: F) + where + F: FnOnce() + Send + 'static, + { + self.executor.spawn_blocking_with_rayon( + move || { + task(); + drop(self.send_idle_on_drop) + }, + rayon_pool_type, + WORKER_TASK_NAME, + ) + } } /// This struct will send a message on `self.tx` when it is dropped. An error will be logged diff --git a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs index 9565e57589..8c33cf5869 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs @@ -37,7 +37,9 @@ const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; +const ATTESTATIONS_PER_ROOT: &str = "attestations_per_root"; const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; +const LIGHT_CLIENT_UPDATES_PER_PARENT_ROOT: &str = "lc_updates_per_parent_root"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// This is to account for any slight drift in the system clock. @@ -171,7 +173,7 @@ pub struct IgnoredRpcBlock { } /// A backfill batch work that has been queued for processing later. -pub struct QueuedBackfillBatch(pub AsyncFn); +pub struct QueuedBackfillBatch(pub BlockingFn); pub struct QueuedColumnReconstruction { pub block_root: Hash256, @@ -829,10 +831,19 @@ impl ReprocessQueue { ); } - if let Some(queued_atts) = self.awaiting_attestations_per_root.get_mut(&root) - && let Some(index) = queued_atts.iter().position(|&id| id == queued_id) + if let Entry::Occupied(mut queued_atts) = + self.awaiting_attestations_per_root.entry(root) + && let Some(index) = + queued_atts.get().iter().position(|&id| id == queued_id) { - queued_atts.swap_remove(index); + let queued_atts_mut = queued_atts.get_mut(); + queued_atts_mut.swap_remove(index); + + // If the vec is empty after this attestation's removal, we need to delete + // the entry to prevent bloating the hashmap indefinitely. + if queued_atts_mut.is_empty() { + queued_atts.remove_entry(); + } } } } @@ -853,13 +864,19 @@ impl ReprocessQueue { error!("Failed to send scheduled light client optimistic update"); } - if let Some(queued_lc_updates) = self - .awaiting_lc_updates_per_parent_root - .get_mut(&parent_root) - && let Some(index) = - queued_lc_updates.iter().position(|&id| id == queued_id) + if let Entry::Occupied(mut queued_lc_updates) = + self.awaiting_lc_updates_per_parent_root.entry(parent_root) + && let Some(index) = queued_lc_updates + .get() + .iter() + .position(|&id| id == queued_id) { - queued_lc_updates.swap_remove(index); + let queued_lc_updates_mut = queued_lc_updates.get_mut(); + queued_lc_updates_mut.swap_remove(index); + + if queued_lc_updates_mut.is_empty() { + queued_lc_updates.remove_entry(); + } } } } @@ -929,11 +946,21 @@ impl ReprocessQueue { &[ATTESTATIONS], self.attestations_delay_queue.len() as i64, ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[ATTESTATIONS_PER_ROOT], + self.awaiting_attestations_per_root.len() as i64, + ); metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, &[LIGHT_CLIENT_UPDATES], self.lc_updates_delay_queue.len() as i64, ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[LIGHT_CLIENT_UPDATES_PER_PARENT_ROOT], + self.awaiting_lc_updates_per_parent_root.len() as i64, + ); } fn recompute_next_backfill_batch_event(&mut self) { @@ -979,6 +1006,7 @@ impl ReprocessQueue { #[cfg(test)] mod tests { use super::*; + use crate::BeaconProcessorConfig; use logging::create_test_tracing_subscriber; use slot_clock::{ManualSlotClock, TestingSlotClock}; use std::ops::Add; @@ -1056,7 +1084,7 @@ mod tests { // Now queue a backfill sync batch. work_reprocessing_tx .try_send(ReprocessQueueMessage::BackfillSync(QueuedBackfillBatch( - Box::pin(async {}), + Box::new(|| {}), ))) .unwrap(); tokio::task::yield_now().await; @@ -1101,4 +1129,97 @@ mod tests { Duration::from_secs(slot_duration), ) } + + fn test_queue() -> ReprocessQueue { + create_test_tracing_subscriber(); + + let config = BeaconProcessorConfig::default(); + let (ready_work_tx, _) = mpsc::channel::(config.max_scheduled_work_queue_len); + let (_, reprocess_work_rx) = + mpsc::channel::(config.max_scheduled_work_queue_len); + let slot_clock = Arc::new(testing_slot_clock(12)); + + ReprocessQueue::new(ready_work_tx, reprocess_work_rx, slot_clock) + } + + // This is a regression test for a memory leak in `awaiting_attestations_per_root`. + // See: https://github.com/sigp/lighthouse/pull/8065 + #[tokio::test] + async fn prune_awaiting_attestations_per_root() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + + // Insert an attestation. + let att = ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { + beacon_block_root, + process_fn: Box::new(|| {}), + }); + + // Process the event to enter it into the delay queue. + queue.handle_message(InboundEvent::Msg(att)); + + // Check that it is queued. + assert_eq!(queue.awaiting_attestations_per_root.len(), 1); + assert!( + queue + .awaiting_attestations_per_root + .contains_key(&beacon_block_root) + ); + + // Advance time to expire the attestation. + advance_time(&queue.slot_clock, 2 * QUEUED_ATTESTATION_DELAY).await; + let ready_msg = queue.next().await.unwrap(); + assert!(matches!(ready_msg, InboundEvent::ReadyAttestation(_))); + queue.handle_message(ready_msg); + + // The entry for the block root should be gone. + assert!(queue.awaiting_attestations_per_root.is_empty()); + } + + // This is a regression test for a memory leak in `awaiting_lc_updates_per_parent_root`. + // See: https://github.com/sigp/lighthouse/pull/8065 + #[tokio::test] + async fn prune_awaiting_lc_updates_per_parent_root() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let parent_root = Hash256::repeat_byte(0xaf); + + // Insert an attestation. + let msg = + ReprocessQueueMessage::UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate { + parent_root, + process_fn: Box::new(|| {}), + }); + + // Process the event to enter it into the delay queue. + queue.handle_message(InboundEvent::Msg(msg)); + + // Check that it is queued. + assert_eq!(queue.awaiting_lc_updates_per_parent_root.len(), 1); + assert!( + queue + .awaiting_lc_updates_per_parent_root + .contains_key(&parent_root) + ); + + // Advance time to expire the update. + advance_time(&queue.slot_clock, 2 * QUEUED_LIGHT_CLIENT_UPDATE_DELAY).await; + let ready_msg = queue.next().await.unwrap(); + assert!(matches!(ready_msg, InboundEvent::ReadyLightClientUpdate(_))); + queue.handle_message(ready_msg); + + // The entry for the block root should be gone. + assert!(queue.awaiting_lc_updates_per_parent_root.is_empty()); + } } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d984d5fedc..02c042bf28 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -412,7 +412,7 @@ where let blobs = if block.message().body().has_blobs() { debug!("Downloading finalized blobs"); if let Some(response) = remote - .get_blobs::(BlockId::Root(block_root), None, &spec) + .get_blob_sidecars::(BlockId::Root(block_root), None, &spec) .await .map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))? { diff --git a/beacon_node/client/src/compute_light_client_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs index 44c3475bfe..0ef35588df 100644 --- a/beacon_node/client/src/compute_light_client_updates.rs +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -3,7 +3,7 @@ use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; use beacon_processor::{BeaconProcessorSend, Work, WorkEvent}; use futures::StreamExt; use futures::channel::mpsc::Receiver; -use tracing::error; +use tracing::{debug, error}; // Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent // updates it is okay to drop some events in case of overloading. In normal network conditions @@ -27,7 +27,7 @@ pub async fn compute_light_client_updates( chain .recompute_and_cache_light_client_updates(event) .unwrap_or_else(|e| { - error!("error computing light_client updates {:?}", e); + debug!("error computing light_client updates {:?}", e); }); let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root }; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 401646f367..a5fa0f3415 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1914,9 +1914,19 @@ impl ExecutionLayer { ) -> Result, Error> { debug!(?block_root, "Sending block to builder"); if spec.is_fulu_scheduled() { - self.post_builder_blinded_blocks_v2(block_root, block) + let resp = self + .post_builder_blinded_blocks_v2(block_root, block) .await - .map(|()| SubmitBlindedBlockResponse::V2) + .map(|()| SubmitBlindedBlockResponse::V2); + // Fallback to v1 if v2 fails because the relay doesn't support it. + // Note: we should remove the fallback post fulu when all relays have support for v2. + if resp.is_err() { + self.post_builder_blinded_blocks_v1(block_root, block) + .await + .map(|full_payload| SubmitBlindedBlockResponse::V1(Box::new(full_payload))) + } else { + resp + } } else { self.post_builder_blinded_blocks_v1(block_root, block) .await diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index e527e466f6..778067c32b 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -2,15 +2,16 @@ use crate::version::inconsistent_fork_rejection; use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; -use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; +use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, - SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, + SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, UnversionedResponse, + beacon_response::ExecutionOptimisticFinalizedMetadata, }; use warp::Rejection; @@ -352,6 +353,68 @@ impl BlockId { Ok((block, blob_sidecar_list, execution_optimistic, finalized)) } + #[allow(clippy::type_complexity)] + pub fn get_blobs_by_versioned_hashes( + &self, + query: BlobsVersionedHashesQuery, + chain: &BeaconChain, + ) -> Result< + UnversionedResponse>, ExecutionOptimisticFinalizedMetadata>, + warp::Rejection, + > { + let (root, execution_optimistic, finalized) = self.root(chain)?; + let block = BlockId::blinded_block_by_root(&root, chain)?.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) + })?; + + // Error if the block is pre-Deneb and lacks blobs. + let blob_kzg_commitments = block.message().body().blob_kzg_commitments().map_err(|_| { + warp_utils::reject::custom_bad_request( + "block is pre-Deneb and has no blobs".to_string(), + ) + })?; + + let blob_indices_opt = query.versioned_hashes.map(|versioned_hashes| { + versioned_hashes + .iter() + .flat_map(|versioned_hash| { + blob_kzg_commitments.iter().position(|commitment| { + let computed_hash = commitment.calculate_versioned_hash(); + computed_hash == *versioned_hash + }) + }) + .map(|index| index as u64) + .collect::>() + }); + + let max_blobs_per_block = chain.spec.max_blobs_per_block(block.epoch()) as usize; + let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + Self::get_blobs_from_data_columns(chain, root, blob_indices_opt, &block)? + } else { + Self::get_blobs(chain, root, blob_indices_opt, max_blobs_per_block)? + } + } else { + BlobSidecarList::new(vec![], max_blobs_per_block) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? + }; + + let blobs = blob_sidecar_list + .into_iter() + .map(|sidecar| BlobWrapper:: { + blob: sidecar.blob.clone(), + }) + .collect(); + + Ok(UnversionedResponse { + metadata: ExecutionOptimisticFinalizedMetadata { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + data: blobs, + }) + } + fn get_blobs( chain: &BeaconChain, root: Hash256, @@ -369,9 +432,9 @@ impl BlockId { let blob_sidecar_list_filtered = match indices { Some(vec) => { - let list: Vec<_> = blob_sidecar_list + let list: Vec<_> = vec .into_iter() - .filter(|blob_sidecar| vec.contains(&blob_sidecar.index)) + .flat_map(|index| blob_sidecar_list.get(index as usize).cloned()) .collect(); BlobSidecarList::new(list, max_blobs_per_block) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5c6a9df739..7f6c97a0f8 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -214,6 +214,7 @@ pub fn prometheus_metrics() -> warp::filters::log::Log warp::filters::log::Log( */ // GET beacon/blob_sidecars/{block_id} - let get_blobs = eth_v1 + let get_blob_sidecars = eth_v1 .and(warp::path("beacon")) .and(warp::path("blob_sidecars")) .and(block_id_or_err) @@ -1950,6 +1948,52 @@ pub fn serve( }, ); + // GET beacon/blobs/{block_id} + let get_blobs = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blobs")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) + .then( + |block_id: BlockId, + version_hashes_res: Result, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let versioned_hashes = version_hashes_res?; + let response = + block_id.get_blobs_by_versioned_hashes(versioned_hashes, &chain)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(response.data.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::No, + response.metadata.execution_optimistic.unwrap_or(false), + response.metadata.finalized.unwrap_or(false), + response.data, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } + }) + }, + ); + /* * beacon/pool */ @@ -4797,6 +4841,7 @@ pub fn serve( .uor(get_beacon_block_attestations) .uor(get_beacon_blinded_block) .uor(get_beacon_block_root) + .uor(get_blob_sidecars) .uor(get_blobs) .uor(get_beacon_pool_attestations) .uor(get_beacon_pool_attester_slashings) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 3705c399bd..ceac60cbad 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -3,12 +3,13 @@ use crate::state_id::StateId; use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, - beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, + beacon_proposer_cache::{ + compute_proposer_duties_from_head, ensure_state_can_determine_proposers_for_epoch, + }, }; use eth2::types::{self as api_types}; use safe_arith::SafeArith; use slot_clock::SlotClock; -use std::cmp::Ordering; use tracing::debug; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -105,36 +106,29 @@ fn try_proposer_duties_from_cache( let head_decision_root = head .snapshot .beacon_state - .proposer_shuffling_decision_root(head_block_root) + .proposer_shuffling_decision_root(head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; - let dependent_root = match head_epoch.cmp(&request_epoch) { - // head_epoch == request_epoch - Ordering::Equal => head_decision_root, - // head_epoch < request_epoch - Ordering::Less => head_block_root, - // head_epoch > request_epoch - Ordering::Greater => { - return Err(warp_utils::reject::custom_server_error(format!( - "head epoch {} is later than request epoch {}", - head_epoch, request_epoch - ))); - } - }; + // This code path can't handle requests for past epochs. + if head_epoch > request_epoch { + return Err(warp_utils::reject::custom_server_error(format!( + "head epoch {head_epoch} is later than request epoch {request_epoch}", + ))); + } chain .beacon_proposer_cache .lock() - .get_epoch::(dependent_root, request_epoch) + .get_epoch::(head_decision_root, request_epoch) .cloned() .map(|indices| { convert_to_api_response( chain, request_epoch, - dependent_root, + head_decision_root, execution_optimistic, indices.to_vec(), ) @@ -204,18 +198,19 @@ fn compute_historic_proposer_duties( } }; - let (state, execution_optimistic) = - if let Some((state_root, mut state, execution_optimistic)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) - .map_err(warp_utils::reject::unhandled_error)?; - (state, execution_optimistic) - } else { - let (state, execution_optimistic, _finalized) = - StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?; - (state, execution_optimistic) - }; + let (state, execution_optimistic) = if let Some((state_root, mut state, execution_optimistic)) = + state_opt + { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_can_determine_proposers_for_epoch(&mut state, state_root, epoch, &chain.spec) + .map_err(warp_utils::reject::unhandled_error)?; + (state, execution_optimistic) + } else { + let (state, execution_optimistic, _finalized) = + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?; + (state, execution_optimistic) + }; // Ensure the state lookup was correct. if state.current_epoch() != epoch { @@ -234,7 +229,7 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. let dependent_root = state - .proposer_shuffling_decision_root(chain.genesis_block_root) + .proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b6411167d9..05a4a4b7a4 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -412,7 +412,7 @@ fn build_data_columns( error!( error = ?e, %slot, - "Invalid data column - not publishing block" + "Invalid data column - not publishing data columns" ); warp_utils::reject::custom_bad_request(format!("{e:?}")) })?; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2072fb9932..9c18a7c1e8 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -90,6 +90,7 @@ struct ApiTester { struct ApiTesterConfig { spec: ChainSpec, retain_historic_states: bool, + import_all_data_columns: bool, } impl Default for ApiTesterConfig { @@ -99,6 +100,7 @@ impl Default for ApiTesterConfig { Self { spec, retain_historic_states: false, + import_all_data_columns: false, } } } @@ -137,6 +139,7 @@ impl ApiTester { .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() + .import_all_data_columns(config.import_all_data_columns) .build(); harness @@ -441,10 +444,7 @@ impl ApiTester { } pub async fn new_mev_tester_default_payload_value() -> Self { - let mut config = ApiTesterConfig { - retain_historic_states: false, - spec: E::default_spec(), - }; + let mut config = ApiTesterConfig::default(); config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); let tester = Self::new_from_config(config) @@ -1858,7 +1858,7 @@ impl ApiTester { }; let result = match self .client - .get_blobs::( + .get_blob_sidecars::( CoreBlockId::Root(block_root), blob_indices.as_deref(), &self.chain.spec, @@ -1879,6 +1879,77 @@ impl ApiTester { self } + pub async fn test_get_blobs(self, versioned_hashes: bool) -> Self { + let block_id = BlockId(CoreBlockId::Finalized); + let (block_root, _, _) = block_id.root(&self.chain).unwrap(); + let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); + let num_blobs = block.num_expected_blobs(); + + let versioned_hashes: Option> = if versioned_hashes { + Some( + block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .iter() + .map(|commitment| commitment.calculate_versioned_hash()) + .collect(), + ) + } else { + None + }; + + let result = match self + .client + .get_blobs::(CoreBlockId::Root(block_root), versioned_hashes.as_deref()) + .await + { + Ok(response) => response.unwrap().into_data(), + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + assert_eq!( + result.len(), + versioned_hashes.map_or(num_blobs, |versioned_hashes| versioned_hashes.len()) + ); + + self + } + + pub async fn test_get_blobs_post_fulu_full_node(self, versioned_hashes: bool) -> Self { + let block_id = BlockId(CoreBlockId::Finalized); + let (block_root, _, _) = block_id.root(&self.chain).unwrap(); + let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); + + let versioned_hashes: Option> = if versioned_hashes { + Some( + block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .iter() + .map(|commitment| commitment.calculate_versioned_hash()) + .collect(), + ) + } else { + None + }; + + match self + .client + .get_blobs::(CoreBlockId::Root(block_root), versioned_hashes.as_deref()) + .await + { + Ok(result) => panic!("Full node are unable to return blobs post-Fulu: {result:?}"), + // Post-Fulu, full nodes don't store blobs and return error 500 + Err(e) => assert_eq!(e.status().unwrap(), 500), + }; + + self + } + /// Test fetching of blob sidecars that are not available in the database due to pruning. /// /// If `zero_blobs` is false, test a block with >0 blobs, which should be unavailable. @@ -1918,7 +1989,7 @@ impl ApiTester { match self .client - .get_blobs::(CoreBlockId::Slot(test_slot), None, &self.chain.spec) + .get_blob_sidecars::(CoreBlockId::Slot(test_slot), None, &self.chain.spec) .await { Ok(result) => { @@ -1956,7 +2027,7 @@ impl ApiTester { match self .client - .get_blobs::(CoreBlockId::Slot(test_slot), None, &self.chain.spec) + .get_blob_sidecars::(CoreBlockId::Slot(test_slot), None, &self.chain.spec) .await { Ok(result) => panic!("queries for pre-Deneb slots should fail. got: {result:?}"), @@ -7704,10 +7775,7 @@ async fn builder_payload_chosen_by_profit_v3() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_works_post_capella() { - let mut config = ApiTesterConfig { - retain_historic_states: false, - spec: E::default_spec(), - }; + let mut config = ApiTesterConfig::default(); config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); config.spec.capella_fork_epoch = Some(Epoch::new(0)); @@ -7724,10 +7792,7 @@ async fn builder_works_post_capella() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_works_post_deneb() { - let mut config = ApiTesterConfig { - retain_historic_states: false, - spec: E::default_spec(), - }; + let mut config = ApiTesterConfig::default(); config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); config.spec.capella_fork_epoch = Some(Epoch::new(0)); @@ -7745,10 +7810,7 @@ async fn builder_works_post_deneb() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blob_sidecars() { - let mut config = ApiTesterConfig { - retain_historic_states: false, - spec: E::default_spec(), - }; + let mut config = ApiTesterConfig::default(); config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); config.spec.capella_fork_epoch = Some(Epoch::new(0)); @@ -7761,6 +7823,53 @@ async fn get_blob_sidecars() { .test_get_blob_sidecars(false) .await .test_get_blob_sidecars(true) + .await + .test_get_blobs(false) + .await + .test_get_blobs(true) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blobs_post_fulu_supernode() { + let mut config = ApiTesterConfig { + retain_historic_states: false, + spec: E::default_spec(), + // For supernode, we import all data columns + import_all_data_columns: true, + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + // We can call the same get_blobs function in this test + // because the function will call get_blobs_by_versioned_hashes which handles peerDAS post-Fulu + .test_get_blobs(false) + .await + .test_get_blobs(true) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blobs_post_fulu_full_node() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_get_blobs_post_fulu_full_node(false) + .await + .test_get_blobs_post_fulu_full_node(true) .await; } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 89c6c58d4f..3b85bc5e27 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -36,7 +36,7 @@ pub struct Config { pub network_dir: PathBuf, /// IP addresses to listen on. - pub(crate) listen_addresses: ListenAddress, + pub listen_addresses: ListenAddress, /// The address to broadcast to peers about which address we are listening on. None indicates /// that no discovery address has been set in the CLI args. @@ -143,6 +143,9 @@ pub struct Config { /// Flag for advertising a fake CGC to peers for testing ONLY. pub advertise_false_custody_group_count: Option, + /// Extra configurations for Mallory. + #[serde(skip)] + pub attacker_config: crate::MalloryConfig, } impl Config { @@ -368,6 +371,7 @@ impl Default for Config { inbound_rate_limiter_config: None, idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, advertise_false_custody_group_count: None, + attacker_config: Default::default(), } } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 49de62546d..a91b2490cc 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -241,14 +241,8 @@ impl Discovery { quic = bootnode_enr.quic4(), "Adding node to routing table" ); - let repr = bootnode_enr.to_string(); - let _ = discv5.add_enr(bootnode_enr).map_err(|e| { - error!( - addr = repr, - error = e.to_string(), - "Could not add peer to the local routing table" - ) - }); + // Error is suppressed for mallory + let _ = discv5.add_enr(bootnode_enr); } // Start the discv5 service and obtain an event stream diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index b6be9b5222..7f4f0bec19 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -2,10 +2,11 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. -mod config; +pub mod config; pub mod service; pub mod discovery; +mod mallory_config; pub mod metrics; pub mod peer_manager; pub mod rpc; @@ -36,6 +37,12 @@ impl FromStr for PeerIdSerialized { } } +impl From for PeerIdSerialized { + fn from(peer_id: PeerId) -> Self { + PeerIdSerialized(peer_id) + } +} + impl Serialize for PeerIdSerialized { fn serialize(&self, serializer: S) -> Result where @@ -110,13 +117,16 @@ pub use discv5; pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p; pub use libp2p::{Multiaddr, multiaddr}; -pub use libp2p::{PeerId, Swarm, core::ConnectedPoint}; +pub use libp2p::{PeerId, Swarm, core::ConnectedPoint, identity::Keypair}; +pub use mallory_config::MalloryConfig; +pub use network_utils::discovery_metrics::scrape_discovery_metrics; pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, peerdb::PeerDB, peerdb::client::Client, peerdb::score::{PeerAction, ReportSource}, }; +pub use service::Behaviour; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; pub use service::api_types::Response; pub use service::utils::*; diff --git a/beacon_node/lighthouse_network/src/mallory_config.rs b/beacon_node/lighthouse_network/src/mallory_config.rs new file mode 100644 index 0000000000..450a40b72d --- /dev/null +++ b/beacon_node/lighthouse_network/src/mallory_config.rs @@ -0,0 +1,37 @@ +/// Every configuration needed for Mallory. +#[derive(Debug, Clone)] +pub struct MalloryConfig { + /* Peer manager stuff */ + /// Ping inbound peers this often (in seconds) instead of the default `PING_INTERVAL_INBOUND`. + pub inbound_peers_ping: Option, + /// Ping outbound peers this often (in seconds) instead of the default `PING_INTERVAL_OUTBOUND`. + pub outbound_peers_ping: Option, + /// Status peers this often (in seconds) instead of the default `STATUS_INTERVAL`. + pub status_interval: Option, + + /* RPC stuff */ + /// Duration in seconds after which an inbound connection with a peer times out instead of the + /// default `RESPONSE_TIMEOUT`. + pub inbound_rpc_timeout: Option, + + /// Duration in seconds after which an outbound connection with a peer times out instead of the + /// default `RESPONSE_TIMEOUT`. + pub outbound_rpc_timeout: Option, + + /* Behaviour Stuff */ + // Allow the user to handle a ping request + pub user_handle_ping: bool, +} + +impl Default for MalloryConfig { + fn default() -> Self { + Self { + inbound_peers_ping: None, + outbound_peers_ping: None, + status_interval: None, + inbound_rpc_timeout: None, + outbound_rpc_timeout: None, + user_handle_ping: false, + } + } +} diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 592fccdc74..ad16bb0421 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -23,6 +23,7 @@ pub use libp2p::identity::Keypair; pub mod peerdb; use crate::peer_manager::peerdb::client::ClientKind; +use crate::types::GossipKind; use libp2p::multiaddr; use network_utils::discovery_metrics; use network_utils::enr_ext::{EnrExt, peer_id_to_node_id}; @@ -1434,8 +1435,16 @@ impl PeerManager { // Update peer score metrics; self.update_peer_score_metrics(); - // Maintain minimum count for custody peers. - self.maintain_custody_peers(); + // Maintain minimum count for custody peers if we are subscribed to any data column topics (i.e. PeerDAS activated) + let peerdas_enabled = self + .network_globals + .gossipsub_subscriptions + .read() + .iter() + .any(|topic| matches!(topic.kind(), &GossipKind::DataColumnSidecar(_))); + if peerdas_enabled { + self.maintain_custody_peers(); + } // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); @@ -3140,4 +3149,60 @@ mod tests { }) } } + + #[tokio::test] + async fn test_custody_peer_logic_only_runs_when_peerdas_enabled() { + use crate::types::{GossipEncoding, GossipTopic}; + + let mut peer_manager = build_peer_manager(5).await; + + // Set up sampling subnets so maintain_custody_peers would have work to do + *peer_manager.network_globals.sampling_subnets.write() = std::collections::HashSet::from([ + DataColumnSubnetId::new(0), + DataColumnSubnetId::new(1), + ]); + + // Test 1: No data column subscriptions - custody peer logic should NOT run + peer_manager.heartbeat(); + + // Should be no new DiscoverSubnetPeers events since PeerDAS is not enabled + let discovery_events: Vec<_> = peer_manager + .events + .iter() + .filter(|event| matches!(event, PeerManagerEvent::DiscoverSubnetPeers(_))) + .collect(); + assert!( + discovery_events.is_empty(), + "Should not generate discovery events when PeerDAS is disabled, but found: {:?}", + discovery_events + ); + + // Test 2: Add data column subscription - custody peer logic should run + let data_column_topic = GossipTopic::new( + GossipKind::DataColumnSidecar(DataColumnSubnetId::new(0)), + GossipEncoding::SSZSnappy, + [0, 0, 0, 0], // fork_digest + ); + peer_manager + .network_globals + .gossipsub_subscriptions + .write() + .insert(data_column_topic); + + // Clear any existing events to isolate the test + peer_manager.events.clear(); + + peer_manager.heartbeat(); + + // Should now have DiscoverSubnetPeers events since PeerDAS is enabled + let discovery_events: Vec<_> = peer_manager + .events + .iter() + .filter(|event| matches!(event, PeerManagerEvent::DiscoverSubnetPeers(_))) + .collect(); + assert!( + !discovery_events.is_empty(), + "Should generate discovery events when PeerDAS is enabled, but found no discovery events" + ); + } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 5e761f90a9..146f2c7fee 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -3,11 +3,11 @@ //! Currently using identify to fingerprint. use libp2p::identify::Info as IdentifyInfo; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use strum::{AsRefStr, EnumIter, IntoStaticStr}; /// Various client and protocol information related to a node. -#[derive(Clone, Debug, Serialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct Client { /// The client's name (Ex: lighthouse, prism, nimbus, etc) pub kind: ClientKind, @@ -21,7 +21,9 @@ pub struct Client { pub agent_string: Option, } -#[derive(Clone, Copy, Debug, Serialize, PartialEq, AsRefStr, IntoStaticStr, EnumIter)] +#[derive( + Clone, Copy, Debug, Serialize, PartialEq, AsRefStr, IntoStaticStr, EnumIter, Deserialize, +)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index acb0188456..c8f0f74376 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -100,7 +100,7 @@ impl SSZSnappyInboundCodec { } } }, - RpcResponse::Error(_, err) => err.as_ssz_bytes(), + RpcResponse::Error(_, err) => err.as_bytes().to_vec().as_ssz_bytes(), RpcResponse::StreamTermination(_) => { unreachable!("Code error - attempting to encode a stream termination") } @@ -336,6 +336,14 @@ impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { + let compress = !matches!( + &item, + RequestType::Raw(RawRequest { + mode: RawMode::Raw, + .. + }) + ); + let bytes = match item { RequestType::Status(req) => { // Send the status message based on the negotiated protocol @@ -367,14 +375,25 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::MetaData(_) | RequestType::LightClientOptimisticUpdate | RequestType::LightClientFinalityUpdate => return Ok(()), + RequestType::Raw(RawRequest { + bytes, + protocol: _, + mode, + }) => match mode { + RawMode::EncodeAndCompress => bytes.as_ssz_bytes(), + RawMode::Compress | RawMode::Raw => bytes, + }, }; + // Mallory doesn't care about inbound limits + /* // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { return Err(RPCError::InternalError( "attempting to encode data > max_packet_size", )); } + */ // Inserts the length prefix of the uncompressed bytes into dst // encoded as a unsigned varint @@ -382,12 +401,14 @@ impl Encoder> for SSZSnappyOutboundCodec { .encode(bytes.len(), dst) .map_err(RPCError::from)?; - let mut writer = FrameEncoder::new(Vec::new()); - writer.write_all(&bytes).map_err(RPCError::from)?; - writer.flush().map_err(RPCError::from)?; - - // Write compressed bytes to `dst` - dst.extend_from_slice(writer.get_ref()); + if compress { + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&bytes).map_err(RPCError::from)?; + writer.flush().map_err(RPCError::from)?; + dst.extend_from_slice(writer.get_ref()); + } else { + dst.extend_from_slice(&bytes); + } Ok(()) } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 720895bbe7..99369f30ac 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -1,6 +1,7 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] +use super::MalloryLocalConfig; use super::methods::{GoodbyeReason, RpcErrorResponse, RpcResponse}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, Protocol, RPCError, RPCProtocol, RequestType}; @@ -40,7 +41,7 @@ const SHUTDOWN_TIMEOUT_SECS: u64 = 15; const MAX_INBOUND_SUBSTREAMS: usize = 32; /// Timeout that will be used for inbound and outbound responses. -const RESP_TIMEOUT: Duration = Duration::from_secs(10); +const _RESP_TIMEOUT: Duration = Duration::from_secs(10); /// Identifier of inbound and outbound substreams from the handler's perspective. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] @@ -143,6 +144,9 @@ where /// Waker, to be sure the handler gets polled when needed. waker: Option, + + /// Additional configurations for the RPC Handler + config: MalloryLocalConfig, } enum HandlerState { @@ -226,6 +230,7 @@ where fork_context: Arc, peer_id: PeerId, connection_id: ConnectionId, + config: MalloryLocalConfig, ) -> Self { RPCHandler { connection_id, @@ -245,6 +250,7 @@ where outbound_io_error_retries: 0, fork_context, waker: None, + config, } } @@ -540,7 +546,8 @@ where // If this substream has not ended, we reset the timer. // Each chunk is allowed RESPONSE_TIMEOUT to be sent. if let Some(ref delay_key) = info.delay_key { - self.inbound_substreams_delay.reset(delay_key, RESP_TIMEOUT); + self.inbound_substreams_delay + .reset(delay_key, self.config.inbound_timeout); } // The stream may be currently idle. Attempt to process more @@ -709,7 +716,7 @@ where }; substream_entry.max_remaining_chunks = Some(max_remaining_chunks); self.outbound_substreams_delay - .reset(delay_key, RESP_TIMEOUT); + .reset(delay_key, self.config.outbound_timeout); } } @@ -955,9 +962,10 @@ where if max_responses > 0 { if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { // Store the stream and tag the output. - let delay_key = self - .inbound_substreams_delay - .insert(self.current_inbound_substream_id, RESP_TIMEOUT); + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + self.config.inbound_timeout, + ); let awaiting_stream = InboundState::Idle(substream); self.inbound_substreams.insert( self.current_inbound_substream_id, @@ -1031,9 +1039,10 @@ where Some(max_responses) }; // new outbound request. Store the stream and tag the output. - let delay_key = self - .outbound_substreams_delay - .insert(self.current_outbound_substream_id, RESP_TIMEOUT); + let delay_key = self.outbound_substreams_delay.insert( + self.current_outbound_substream_id, + self.config.outbound_timeout, + ); let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { substream: Box::new(substream), request, diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9319973e59..09da6d6c0c 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -1,5 +1,6 @@ //! Available RPC methods types and ids. +use super::protocol::SupportedProtocol; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use regex::bytes::Regex; use serde::Serialize; @@ -11,6 +12,7 @@ use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; +use strum::{Display as StrumDisplay, EnumString}; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; @@ -129,6 +131,38 @@ pub struct Ping { pub data: u64, } +#[derive(Debug, Clone, PartialEq)] +pub struct RawRequest { + pub bytes: Vec, + pub protocol: SupportedProtocol, + pub mode: RawMode, +} + +#[derive(Debug, Clone, PartialEq, EnumString, StrumDisplay)] +pub enum RawMode { + /// SSZ encode, Snappy compress. + #[strum(serialize = "encode-compress")] + EncodeAndCompress, + /// Only Snappy compress. + #[strum(serialize = "compress")] + Compress, + /// Do not alter the bytes. + #[strum(serialize = "raw")] + Raw, +} + +impl Default for RawMode { + fn default() -> Self { + RawMode::EncodeAndCompress + } +} + +impl std::fmt::Display for RawRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + /// The METADATA request structure. #[superstruct( variants(V1, V2, V3), @@ -411,6 +445,8 @@ impl DataColumnsByRangeRequest { } } +const MALLORY_MAX_REQUEST_BLOCKS: usize = 10000000000000000000; + /// Request a number of beacon block roots from a peer. #[superstruct( variants(V1, V2), @@ -498,6 +534,12 @@ impl BlocksByRootRequest { .map_err(|e| format!("BlocksByRootRequestV1 too many roots: {e:?}"))?; Ok(Self::V1(BlocksByRootRequestV1 { block_roots })) } + + pub fn mallory_new(block_roots: Vec) -> Result { + let block_roots = RuntimeVariableList::new(block_roots, MALLORY_MAX_REQUEST_BLOCKS) + .map_err(|e| format!("Mallory BlocksByRootRequest too many roots: {e:?}"))?; + Ok(Self::V2(BlocksByRootRequestV2 { block_roots })) + } } /// Request a number of beacon blocks and blobs from a peer. @@ -657,10 +699,10 @@ impl ResponseTermination { /// and the contents of the response #[derive(Debug, Clone)] pub enum RpcResponse { - /// The response is a successful. + /// The response is successful. Success(RpcSuccessResponse), - Error(RpcErrorResponse, ErrorType), + Error(RpcErrorResponse, String), /// Received a stream termination indicating which response is being terminated. StreamTermination(ResponseTermination), @@ -710,7 +752,7 @@ impl RpcResponse { 140 => RpcErrorResponse::BlobsNotFoundForBlock, _ => RpcErrorResponse::Unknown, }; - RpcResponse::Error(code, err) + RpcResponse::Error(code, err.to_string()) } /// Returns true if this response always terminates the stream. diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7c43018af8..784ed742c0 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -33,7 +33,11 @@ pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, ResponseTermination, RpcErrorResponse, StatusMessage, }; -pub use protocol::{Protocol, RPCError}; + +use crate::MalloryConfig; + +pub use methods::{RawMode, RawRequest}; +pub use protocol::{Protocol, RPCError, SupportedProtocol}; pub(crate) mod codec; pub mod config; @@ -142,6 +146,16 @@ pub struct RPCMessage { type BehaviourAction = ToSwarm, RPCSend>; +/// Additional configurations for the RPC Behaviour. +#[derive(Clone, Copy)] +pub struct MalloryLocalConfig { + /// Timeout in seconds for inbound connections. + pub inbound_timeout: std::time::Duration, + /// Timeout for outbound connections. + pub outbound_timeout: std::time::Duration, + pub self_handle_ping: bool, +} + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { @@ -157,6 +171,9 @@ pub struct RPC { enable_light_client_server: bool, /// A sequential counter indicating when data gets modified. seq_number: u64, + + /// Mallory Config + config: MalloryLocalConfig, } impl RPC { @@ -166,6 +183,7 @@ impl RPC { inbound_rate_limiter_config: Option, outbound_rate_limiter_config: Option, seq_number: u64, + mallory_config: &MalloryConfig, ) -> Self { let response_limiter = inbound_rate_limiter_config.map(|config| { debug!(?config, "Using response rate limiting params"); @@ -177,6 +195,16 @@ impl RPC { SelfRateLimiter::new(outbound_rate_limiter_config, fork_context.clone()) .expect("Outbound limiter configuration parameters are valid"); + let mallory_config = MalloryLocalConfig { + inbound_timeout: std::time::Duration::from_secs( + mallory_config.inbound_rpc_timeout.unwrap_or(10), + ), + outbound_timeout: std::time::Duration::from_secs( + mallory_config.outbound_rpc_timeout.unwrap_or(10), + ), + self_handle_ping: mallory_config.user_handle_ping, + }; + RPC { response_limiter, outbound_request_limiter, @@ -185,6 +213,7 @@ impl RPC { fork_context, enable_light_client_server, seq_number, + config: mallory_config, } } @@ -297,6 +326,14 @@ impl RPC { trace!(%peer_id, "Sending Ping"); self.send_request(peer_id, id, RequestType::Ping(ping)); } + + /// Sends a pong response + pub fn pong(&mut self, inbound_request_id: InboundRequestId, data: u64) { + let _ = self.send_response( + inbound_request_id, + RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data })), + ); + } } impl NetworkBehaviour for RPC @@ -324,7 +361,13 @@ where (), ); - let handler = RPCHandler::new(protocol, self.fork_context.clone(), peer_id, connection_id); + let handler = RPCHandler::new( + protocol, + self.fork_context.clone(), + peer_id, + connection_id, + self.config.clone(), + ); Ok(handler) } @@ -347,7 +390,13 @@ where (), ); - let handler = RPCHandler::new(protocol, self.fork_context.clone(), peer_id, connection_id); + let handler = RPCHandler::new( + protocol, + self.fork_context.clone(), + peer_id, + connection_id, + self.config, + ); Ok(handler) } @@ -474,14 +523,15 @@ where // If we received a Ping, we queue a Pong response. if let RequestType::Ping(_) = request_type { - trace!(connection_id = %connection_id, %peer_id, "Received Ping, queueing Pong"); - self.send_response( - request_id, - RpcResponse::Success(RpcSuccessResponse::Pong(Ping { - data: self.seq_number, - })), - ) - .expect("Request to exist"); + if !self.config.self_handle_ping { + trace!(connection_id = %connection_id, %peer_id, "Received Ping, queueing Pong"); + let _ = self.send_response( + request_id, + RpcResponse::Success(RpcSuccessResponse::Pong(Ping { + data: self.seq_number, + })), + ); + } } self.events.push(ToSwarm::GenerateEvent(RPCMessage { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 228a74f08c..be72515088 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -728,6 +728,7 @@ pub enum RequestType { LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), MetaData(MetadataRequest), + Raw(RawRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. @@ -751,6 +752,7 @@ impl RequestType { RequestType::LightClientOptimisticUpdate => 1, RequestType::LightClientFinalityUpdate => 1, RequestType::LightClientUpdatesByRange(req) => req.count, + RequestType::Raw(_) => 1, } } @@ -790,6 +792,7 @@ impl RequestType { RequestType::LightClientUpdatesByRange(_) => { SupportedProtocol::LightClientUpdatesByRangeV1 } + RequestType::Raw(r) => r.protocol, } } @@ -813,6 +816,7 @@ impl RequestType { RequestType::LightClientFinalityUpdate => unreachable!(), RequestType::LightClientOptimisticUpdate => unreachable!(), RequestType::LightClientUpdatesByRange(_) => unreachable!(), + RequestType::Raw(_) => unreachable!(), } } @@ -876,6 +880,7 @@ impl RequestType { SupportedProtocol::LightClientUpdatesByRangeV1, Encoding::SSZSnappy, )], + RequestType::Raw(req) => vec![ProtocolId::new(req.protocol, Encoding::SSZSnappy)], } } @@ -895,6 +900,7 @@ impl RequestType { RequestType::LightClientOptimisticUpdate => true, RequestType::LightClientFinalityUpdate => true, RequestType::LightClientUpdatesByRange(_) => true, + RequestType::Raw(_) => true, } } } @@ -1016,6 +1022,7 @@ impl std::fmt::Display for RequestType { RequestType::LightClientUpdatesByRange(_) => { write!(f, "Light client updates by range request") } + RequestType::Raw(raw) => write!(f, "Raw: {}", raw), } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 65cd1c2e61..8b364f506c 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -382,16 +382,41 @@ impl RPCRateLimiter { pub fn prune(&mut self) { let time_since_start = self.init_time.elapsed(); - self.ping_rl.prune(time_since_start); - self.status_rl.prune(time_since_start); - self.metadata_rl.prune(time_since_start); - self.goodbye_rl.prune(time_since_start); - self.bbrange_rl.prune(time_since_start); - self.bbroots_rl.prune(time_since_start); - self.blbrange_rl.prune(time_since_start); - self.blbroot_rl.prune(time_since_start); - self.dcbrange_rl.prune(time_since_start); - self.dcbroot_rl.prune(time_since_start); + + let Self { + prune_interval: _, + init_time: _, + goodbye_rl, + ping_rl, + metadata_rl, + status_rl, + bbrange_rl, + bbroots_rl, + blbrange_rl, + blbroot_rl, + dcbroot_rl, + dcbrange_rl, + lc_bootstrap_rl, + lc_optimistic_update_rl, + lc_finality_update_rl, + lc_updates_by_range_rl, + fork_context: _, + } = self; + + goodbye_rl.prune(time_since_start); + ping_rl.prune(time_since_start); + metadata_rl.prune(time_since_start); + status_rl.prune(time_since_start); + bbrange_rl.prune(time_since_start); + bbroots_rl.prune(time_since_start); + blbrange_rl.prune(time_since_start); + blbroot_rl.prune(time_since_start); + dcbrange_rl.prune(time_since_start); + dcbroot_rl.prune(time_since_start); + lc_bootstrap_rl.prune(time_since_start); + lc_optimistic_update_rl.prune(time_since_start); + lc_finality_update_rl.prune(time_since_start); + lc_updates_by_range_rl.prune(time_since_start); } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 0f5fd99c27..536949fad9 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,7 +1,10 @@ -use crate::rpc::methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}; +use crate::rpc::methods::{ + Ping, ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage, +}; use libp2p::PeerId; use std::fmt::{Display, Formatter}; use std::sync::Arc; +use strum::IntoStaticStr; use types::{ BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, @@ -124,8 +127,9 @@ pub enum AppRequestId { // sent. The main difference is the absense of Pong and Metadata, which don't leave the // Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and // `RPCCodedResponse`. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, IntoStaticStr)] pub enum Response { + Ping(u64), /// A Status message. Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. @@ -178,6 +182,7 @@ impl std::convert::From> for RpcResponse { None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), + Response::Ping(data) => RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data })), Response::LightClientBootstrap(b) => { RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) } diff --git a/beacon_node/lighthouse_network/src/service/mallory.rs b/beacon_node/lighthouse_network/src/service/mallory.rs new file mode 100644 index 0000000000..f227a06fec --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/mallory.rs @@ -0,0 +1,191 @@ +use super::*; +use libp2p::core::ConnectedPoint; +use libp2p::core::transport::{ListenerId, TransportError}; +use libp2p::swarm::*; +use std::io; +/// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. +#[derive(Debug)] +pub enum MallorySwarmEvent { + /// One of the listeners gracefully closed. + ListenerClosed { + /// The listener that closed. + listener_id: libp2p::core::transport::ListenerId, + /// The addresses that the listener was listening on. These addresses are now considered + /// expired, similar to if a [`ExpiredListenAddr`](SwarmEvent::ExpiredListenAddr) event + /// has been generated for each of them. + addresses: Vec, + /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` + /// if the stream produced an error. + reason: Result<(), std::io::Error>, + }, + /// One of the listeners reported a non-fatal error. + ListenerError { + /// The listener that errored. + listener_id: ListenerId, + /// The listener error. + error: io::Error, + }, + /// Outgoing connection attempt failed. + OutgoingConnectionError { + /// Identifier of the connection. + connection_id: ConnectionId, + /// If known, [`PeerId`] of the peer we tried to reach. + peer_id: Option, + /// Error that has been encountered. + error: DialError, + }, + IncomingConnection { + /// Identifier of the connection. + connection_id: ConnectionId, + /// Local connection address. + /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) + /// event. + local_addr: Multiaddr, + /// Address used to send back data to the remote. + send_back_addr: Multiaddr, + }, + /// An error happened on a connection during its initial handshake. + /// + /// This can include, for example, an error during the handshake of the encryption layer, or + /// the connection unexpectedly closed. + IncomingConnectionError { + /// PeerId + peer_id: Option, + /// Identifier of the connection. + connection_id: ConnectionId, + /// Local connection address. + /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) + /// event. + local_addr: Multiaddr, + /// Address used to send back data to the remote. + send_back_addr: Multiaddr, + /// The error that happened. + error: ListenError, + }, + Dialing { + /// Identity of the peer that we are connecting to. + peer_id: Option, + /// Identifier of the connection. + connection_id: ConnectionId, + }, + ConnectionClosed { + /// Identity of the peer that we have connected to. + peer_id: PeerId, + /// Identifier of the connection. + connection_id: ConnectionId, + /// Endpoint of the connection that has been closed. + endpoint: ConnectedPoint, + /// Number of other remaining connections to this same peer. + num_established: u32, + /// Reason for the disconnection, if it was not a successful + /// active close. + cause: Option, + }, + /// A connection to the given peer has been opened. + ConnectionEstablished { + /// Identity of the peer that we have connected to. + peer_id: PeerId, + /// Identifier of the connection. + connection_id: ConnectionId, + /// Endpoint of the connection that has been opened. + endpoint: ConnectedPoint, + /// Number of established connections to this peer, including the one that has just been + /// opened. + num_established: std::num::NonZeroU32, + /// [`Some`] when the new connection is an outgoing connection. + /// Addresses are dialed concurrently. Contains the addresses and errors + /// of dial attempts that failed before the one successful dial. + concurrent_dial_errors: Option)>>, + /// How long it took to establish this connection + established_in: std::time::Duration, + }, +} + +impl TryFrom> for MallorySwarmEvent { + type Error = SwarmEvent; + + fn try_from(event: SwarmEvent) -> Result { + match event { + SwarmEvent::ListenerClosed { + listener_id, + addresses, + reason, + } => Ok(MallorySwarmEvent::ListenerClosed { + listener_id, + addresses, + reason, + }), + SwarmEvent::ListenerError { listener_id, error } => { + Ok(MallorySwarmEvent::ListenerError { listener_id, error }) + } + SwarmEvent::OutgoingConnectionError { + connection_id, + peer_id, + error, + } => Ok(MallorySwarmEvent::OutgoingConnectionError { + connection_id, + peer_id, + error, + }), + SwarmEvent::IncomingConnection { + connection_id, + local_addr, + send_back_addr, + } => Ok(MallorySwarmEvent::IncomingConnection { + connection_id, + local_addr, + send_back_addr, + }), + SwarmEvent::IncomingConnectionError { + peer_id, + connection_id, + local_addr, + send_back_addr, + error, + } => Ok(MallorySwarmEvent::IncomingConnectionError { + peer_id, + connection_id, + local_addr, + send_back_addr, + error, + }), + SwarmEvent::Dialing { + peer_id, + connection_id, + } => Ok(MallorySwarmEvent::Dialing { + peer_id, + connection_id, + }), + SwarmEvent::ConnectionClosed { + peer_id, + connection_id, + endpoint, + num_established, + cause, + } => Ok(MallorySwarmEvent::ConnectionClosed { + peer_id, + connection_id, + endpoint, + num_established, + cause: cause.map(|v| format!("{:?}", v)), + }), + SwarmEvent::ConnectionEstablished { + peer_id, + connection_id, + endpoint, + num_established, + concurrent_dial_errors, + established_in, + } => Ok(MallorySwarmEvent::ConnectionEstablished { + peer_id, + connection_id, + endpoint, + num_established, + concurrent_dial_errors, + established_in, + }), + ev => Err(ev), // Don't pass other events up. + } + } +} +// Used for Mallory diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea2c53a07f..faa641dc1a 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -22,8 +22,8 @@ use crate::{Enr, NetworkGlobals, PubsubMessage, TopicHash, metrics}; use api_types::{AppRequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ - IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, - TopicScoreParams, + Config as GossipsubConfig, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, + MessageId, PublishError, RawMessage, TopicScoreParams, }; use gossipsub_scoring_parameters::{PeerScoreSettings, lighthouse_gossip_thresholds}; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; @@ -48,7 +48,9 @@ use utils::{Context as ServiceContext, build_transport, strip_peer_id}; pub mod api_types; mod gossip_cache; pub mod gossipsub_scoring_parameters; +mod mallory; pub mod utils; +pub use mallory::*; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 3; @@ -104,6 +106,10 @@ pub enum NetworkEvent { ZeroListeners, /// A peer has an updated custody group count from MetaData. PeerUpdatedCustodyGroupCount(PeerId), + /// Mallory: Identify has been received. + IdentifyReceived(PeerId), + /// Mallory: Pass swarm events to mallory to handle + MallorySwarmEvent(MallorySwarmEvent), } pub type Gossipsub = gossipsub::Behaviour; @@ -111,7 +117,7 @@ pub type SubscriptionFilter = gossipsub::MaxCountSubscriptionFilter; #[derive(NetworkBehaviour)] -pub(crate) struct Behaviour +pub struct Behaviour where E: EthSpec, { @@ -145,7 +151,7 @@ where /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. pub struct Network { - swarm: libp2p::swarm::Swarm>, + pub swarm: libp2p::swarm::Swarm>, /* Auxiliary Fields */ /// A collections of variables accessible outside the network service. network_globals: Arc>, @@ -160,9 +166,11 @@ pub struct Network { score_settings: PeerScoreSettings, /// The interval for updating gossipsub scores update_gossipsub_scores: tokio::time::Interval, - gossip_cache: GossipCache, + pub gossip_cache: GossipCache, /// This node's PeerId. pub local_peer_id: PeerId, + /// Mallory specific. User handles the ping requests. + user_handle_ping: bool, } /// Implements the combined behaviour for the libp2p service. @@ -171,11 +179,12 @@ impl Network { executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, custody_group_count: u64, + gs_config: Option, ) -> Result<(Self, Arc>), String> { let config = ctx.config.clone(); trace!("Libp2p Service starting"); // initialise the node's ID - let local_keypair = utils::load_private_key(&config); + let local_keypair = ctx.keypair; // Trusted peers will also be marked as explicit in GossipSub. // Cfr. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#explicit-peering-agreements @@ -228,14 +237,17 @@ impl Network { message_domain_valid_snappy: ctx.chain_spec.message_domain_valid_snappy, gossipsub_max_transmit_size: ctx.chain_spec.max_message_size(), }; - let gs_config = gossipsub_config( - config.network_load, - ctx.fork_context.clone(), - gossipsub_config_params, - ctx.chain_spec.seconds_per_slot, - E::slots_per_epoch(), - config.idontwant_message_size_threshold, - ); + let gs_config = match gs_config { + Some(config) => config, + None => gossipsub_config( + config.network_load, + ctx.fork_context.clone(), + gossipsub_config_params, + ctx.chain_spec.seconds_per_slot, + E::slots_per_epoch(), + config.idontwant_message_size_threshold, + ), + }; let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); @@ -373,6 +385,7 @@ impl Network { config.inbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(), seq_number, + &config.attacker_config, ); let discovery = { @@ -411,20 +424,35 @@ impl Network { quic_enabled: !config.disable_quic_support, metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, + ping_interval_inbound: config + .attacker_config + .inbound_peers_ping + .unwrap_or(crate::peer_manager::config::DEFAULT_PING_INTERVAL_INBOUND), + ping_interval_outbound: config + .attacker_config + .outbound_peers_ping + .unwrap_or(crate::peer_manager::config::DEFAULT_PING_INTERVAL_OUTBOUND), + status_interval: config + .attacker_config + .status_interval + .unwrap_or(crate::peer_manager::config::DEFAULT_STATUS_INTERVAL), ..Default::default() }; PeerManager::new(peer_manager_cfg, network_globals.clone())? }; + let max_incomming = if let Some(connections) = ctx.incoming_connections.as_ref() { + *connections + } else { + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32 + }; + let connection_limits = { let limits = libp2p::connection_limits::ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) + .with_max_pending_incoming(Some(max_incomming)) .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) + .with_max_established_incoming(Some(max_incomming)) .with_max_established_outgoing(Some( (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, )) @@ -508,6 +536,7 @@ impl Network { update_gossipsub_scores, gossip_cache, local_peer_id, + user_handle_ping: config.attacker_config.user_handle_ping, }; network.start(&config).await?; @@ -1184,7 +1213,7 @@ impl Network { } /// Sends a METADATA request to a peer. - fn send_meta_data_request(&mut self, peer_id: PeerId) { + pub fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = if self.fork_context.spec.is_peer_das_scheduled() { // Nodes with higher custody will probably start advertising it // before peerdas is activated @@ -1456,9 +1485,24 @@ impl Network { /* Behaviour managed protocols: Ping and Metadata */ RequestType::Ping(ping) => { // inform the peer manager and send the response + if self.user_handle_ping { + return Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }); + } self.peer_manager_mut().ping_request(&peer_id, ping.data); None } + RequestType::Raw(_) => { + // inform the peer manager and send the response + return Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }); + } RequestType::MetaData(_req) => { // send the requested meta-data let metadata = self.network_globals.local_metadata.read().clone(); @@ -1700,6 +1744,7 @@ impl Network { } // send peer info to the peer manager. self.peer_manager_mut().identify(&peer_id, &info); + return Some(NetworkEvent::IdentifyReceived(peer_id)); } identify::Event::Sent { .. } => {} identify::Event::Error { .. } => {} @@ -1811,7 +1856,15 @@ impl Network { // Poll the libp2p `Swarm`. // This will poll the swarm and do maintenance routines. Some(event) = self.swarm.next() => { - if let Some(event) = self.parse_swarm_event(event) { + // Try convert to mallory event.This just passes some swarm events up to mallory, + // rather than processing here. + // Attempt passing swarm events up to Mallory + let swarm_event = match MallorySwarmEvent::try_from(event) { + Ok(ev) => return NetworkEvent::MallorySwarmEvent(ev), + Err(ev) => ev, + }; + + if let Some(event) = self.parse_swarm_event(swarm_event) { return event; } }, @@ -1838,6 +1891,20 @@ impl Network { } } + /// Publish a raw gossipsub RPC message to a specific target. + pub fn publish_raw_targeted(&mut self, msg: RawMessage, target: PeerId) { + if let Err(e) = self.gossipsub_mut().raw_publish_targeted(target, msg) { + warn!("error" = ?e, "Could not publish message"); + } + } + + /// Publish a raw gossipsub RPC message to a specific target. + pub fn publish_raw(&mut self, msg: RawMessage, topic: Topic) { + if let Err(e) = self.gossipsub_mut().raw_publish(topic, msg) { + warn!("error" = ?e, "Could not publish message"); + } + } + fn parse_swarm_event( &mut self, event: SwarmEvent>, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index a0026837e3..2c17dbcb7a 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -30,6 +30,8 @@ pub struct Context<'a> { pub fork_context: Arc, pub chain_spec: Arc, pub libp2p_registry: Option<&'a mut Registry>, + pub keypair: Keypair, + pub incoming_connections: Option, } type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; @@ -41,7 +43,7 @@ pub fn build_transport( quic_support: bool, ) -> std::io::Result { // mplex config - let mut mplex_config = libp2p_mplex::MplexConfig::new(); + let mut mplex_config = libp2p_mplex::Config::new(); mplex_config.set_max_buffer_size(256); mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index b22adfbc48..59a9e9f50e 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,7 +1,7 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use strum::AsRefStr; +use strum::{AsRefStr, IntoStaticStr}; use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; use crate::Subnet; @@ -145,7 +145,7 @@ pub struct GossipTopic { /// Enum that brings these topics into the rust type system. // NOTE: There is intentionally no unknown type here. We only allow known gossipsub topics. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, AsRefStr)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, AsRefStr, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum GossipKind { /// Topic for publishing beacon blocks. diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index 1787399761..18a9874252 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -26,6 +26,10 @@ pub const SPAN_PROCESS_RPC_BLOCK: &str = "process_rpc_block"; pub const SPAN_PROCESS_RPC_BLOBS: &str = "process_rpc_blobs"; pub const SPAN_PROCESS_RPC_CUSTODY_COLUMNS: &str = "process_rpc_custody_columns"; pub const SPAN_PROCESS_CHAIN_SEGMENT: &str = "process_chain_segment"; +pub const SPAN_PROCESS_CHAIN_SEGMENT_BACKFILL: &str = "process_chain_segment_backfill"; + +/// Fork choice root spans +pub const SPAN_RECOMPUTE_HEAD: &str = "recompute_head_at_slot"; /// RPC methods root spans pub const SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST: &str = "handle_blocks_by_range_request"; @@ -58,6 +62,7 @@ pub const LH_BN_ROOT_SPAN_NAMES: &[&str] = &[ SPAN_PROCESS_RPC_BLOBS, SPAN_PROCESS_RPC_CUSTODY_COLUMNS, SPAN_PROCESS_CHAIN_SEGMENT, + SPAN_PROCESS_CHAIN_SEGMENT_BACKFILL, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 2a7fedb53e..a470271fc1 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; mod nat; mod network_beacon_processor; mod persisted_dht; -mod router; +pub mod router; mod status; mod subnet_service; mod sync; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 1f1a3427e7..fa6b5fd243 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -34,7 +34,6 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; -use tokio::sync::mpsc::error::TrySendError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, @@ -709,6 +708,7 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InvalidKzgProof { .. } | GossipDataColumnError::UnexpectedDataColumn | GossipDataColumnError::InvalidColumnIndex(_) + | GossipDataColumnError::MaxBlobsPerBlockExceeded { .. } | GossipDataColumnError::InconsistentCommitmentsLength { .. } | GossipDataColumnError::InconsistentProofsLength { .. } | GossipDataColumnError::NotFinalizedDescendant { .. } => { @@ -1054,36 +1054,43 @@ impl NetworkBeaconProcessor { "Processed data column, waiting for other components" ); - // Instead of triggering reconstruction immediately, schedule it to be run. If - // another column arrives it either completes availability or pushes - // reconstruction back a bit. - let cloned_self = Arc::clone(self); - let block_root = *block_root; - let send_result = self.beacon_processor_send.try_send(WorkEvent { - drop_during_sync: false, - work: Work::Reprocess(ReprocessQueueMessage::DelayColumnReconstruction( - QueuedColumnReconstruction { - block_root, - slot: *slot, - process_fn: Box::pin(async move { - cloned_self - .attempt_data_column_reconstruction(block_root, true) - .await; - }), - }, - )), - }); - if let Err(TrySendError::Full(WorkEvent { - work: - Work::Reprocess(ReprocessQueueMessage::DelayColumnReconstruction( - reconstruction, - )), - .. - })) = send_result + if self + .chain + .data_availability_checker + .custody_context() + .should_attempt_reconstruction( + slot.epoch(T::EthSpec::slots_per_epoch()), + &self.chain.spec, + ) { - warn!("Unable to send reconstruction to reprocessing"); - // Execute it immediately instead. - reconstruction.process_fn.await; + // Instead of triggering reconstruction immediately, schedule it to be run. If + // another column arrives, it either completes availability or pushes + // reconstruction back a bit. + let cloned_self = Arc::clone(self); + let block_root = *block_root; + + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess( + ReprocessQueueMessage::DelayColumnReconstruction( + QueuedColumnReconstruction { + block_root, + slot: *slot, + process_fn: Box::pin(async move { + cloned_self + .attempt_data_column_reconstruction(block_root) + .await; + }), + }, + ), + ), + }) + .is_err() + { + warn!("Unable to send reconstruction to reprocessing"); + } } } }, @@ -1494,11 +1501,12 @@ impl NetworkBeaconProcessor { let result = self .chain - .process_block_with_early_caching( + .process_block( block_root, verified_block, - BlockImportSource::Gossip, NotifyExecutionLayer::Yes, + BlockImportSource::Gossip, + || Ok(()), ) .await; register_process_result_metrics(&result, metrics::BlockSource::Gossip, "block"); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 73349cd431..85ccde1d59 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -6,9 +6,7 @@ use beacon_chain::data_column_verification::{GossipDataColumnError, observe_goss use beacon_chain::fetch_blobs::{ EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs, }; -use beacon_chain::{ - AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer, -}; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError}; use beacon_processor::{ BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, @@ -28,7 +26,7 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, error::TrySendError}; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, error, instrument, trace, warn}; use types::*; pub use sync_methods::ChainSegmentProcessId; @@ -500,33 +498,23 @@ impl NetworkBeaconProcessor { process_id: ChainSegmentProcessId, blocks: Vec>, ) -> Result<(), Error> { - let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); debug!(blocks = blocks.len(), id = ?process_id, "Batch sending for process"); - let processor = self.clone(); - let process_fn = async move { - let notify_execution_layer = if processor - .network_globals - .sync_state - .read() - .is_syncing_finalized() - { - NotifyExecutionLayer::No - } else { - NotifyExecutionLayer::Yes - }; - processor - .process_chain_segment(process_id, blocks, notify_execution_layer) - .await; - }; - let process_fn = Box::pin(process_fn); // Back-sync batches are dispatched with a different `Work` variant so // they can be rate-limited. - let work = if is_backfill { - Work::ChainSegmentBackfill(process_fn) - } else { - Work::ChainSegment(process_fn) + let work = match process_id { + ChainSegmentProcessId::RangeBatchId(_, _) => { + let process_fn = async move { + processor.process_chain_segment(process_id, blocks).await; + }; + Work::ChainSegment(Box::pin(process_fn)) + } + ChainSegmentProcessId::BackSyncBatchId(_) => { + let process_fn = + move || processor.process_chain_segment_backfill(process_id, blocks); + Work::ChainSegmentBackfill(Box::new(process_fn)) + } }; self.try_send(BeaconWorkEvent { @@ -825,36 +813,15 @@ impl NetworkBeaconProcessor { } } - /// Attempt to reconstruct all data columns if the following conditions satisfies: - /// - Our custody requirement is all columns - /// - We have >= 50% of columns, but not all columns - /// - /// Returns `Some(AvailabilityProcessingStatus)` if reconstruction is successfully performed, - /// otherwise returns `None`. - /// - /// The `publish_columns` parameter controls whether reconstructed columns should be published - /// to the gossip network. - async fn attempt_data_column_reconstruction( - self: &Arc, - block_root: Hash256, - publish_columns: bool, - ) -> Option { - // Only supernodes attempt reconstruction - if !self - .chain - .data_availability_checker - .custody_context() - .current_is_supernode - { - return None; - } - + /// Attempts to reconstruct all data columns if the conditions checked in + /// [`DataAvailabilityCheckerInner::check_and_set_reconstruction_started`] are satisfied. + #[instrument(level = "debug", skip_all, fields(?block_root))] + async fn attempt_data_column_reconstruction(self: &Arc, block_root: Hash256) { let result = self.chain.reconstruct_data_columns(block_root).await; + match result { Ok(Some((availability_processing_status, data_columns_to_publish))) => { - if publish_columns { - self.publish_data_columns_gradually(data_columns_to_publish, block_root); - } + self.publish_data_columns_gradually(data_columns_to_publish, block_root); match &availability_processing_status { AvailabilityProcessingStatus::Imported(hash) => { debug!( @@ -867,21 +834,18 @@ impl NetworkBeaconProcessor { AvailabilityProcessingStatus::MissingComponents(_, _) => { debug!( result = "imported all custody columns", - block_hash = %block_root, + %block_root, "Block components still missing block after reconstruction" ); } } - - Some(availability_processing_status) } Ok(None) => { // reason is tracked via the `KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL` metric trace!( - block_hash = %block_root, + %block_root, "Reconstruction not required for block" ); - None } Err(e) => { error!( @@ -889,7 +853,6 @@ impl NetworkBeaconProcessor { error = ?e, "Error during data column reconstruction" ); - None } } } @@ -978,6 +941,7 @@ impl NetworkBeaconProcessor { /// by some nodes on the network as soon as possible. Our hope is that some columns arrive from /// other nodes in the meantime, obviating the need for us to publish them. If no other /// publisher exists for a column, it will eventually get published here. + #[instrument(level="debug", skip_all, fields(?block_root, data_column_count=data_columns_to_publish.len()))] fn publish_data_columns_gradually( self: &Arc, mut data_columns_to_publish: DataColumnSidecarList, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 9ddba86b81..58e02ffe00 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -437,12 +437,12 @@ impl NetworkBeaconProcessor { } } Err(e) => { - // TODO(das): lower log level when feature is stabilized - error!( + // The node is expected to be able to serve these columns, but it fails to retrieve them. + warn!( block_root = ?data_column_ids_by_root.block_root, %peer_id, error = ?e, - "Error getting data column" + "Error getting data column for by root request " ); return Err((RpcErrorResponse::ServerError, "Error getting data column")); } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f24495cc54..1d99540c29 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -19,9 +19,10 @@ use beacon_processor::{ use beacon_processor::{Work, WorkEvent}; use lighthouse_network::PeerAction; use lighthouse_tracing::{ - SPAN_PROCESS_CHAIN_SEGMENT, SPAN_PROCESS_RPC_BLOBS, SPAN_PROCESS_RPC_BLOCK, - SPAN_PROCESS_RPC_CUSTODY_COLUMNS, + SPAN_PROCESS_CHAIN_SEGMENT, SPAN_PROCESS_CHAIN_SEGMENT_BACKFILL, SPAN_PROCESS_RPC_BLOBS, + SPAN_PROCESS_RPC_BLOCK, SPAN_PROCESS_RPC_CUSTODY_COLUMNS, }; +use logging::crit; use std::sync::Arc; use std::time::Duration; use store::KzgCommitment; @@ -167,11 +168,12 @@ impl NetworkBeaconProcessor { let signed_beacon_block = block.block_cloned(); let result = self .chain - .process_block_with_early_caching( + .process_block( block_root, block, - BlockImportSource::Lookup, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), ) .await; register_process_result_metrics(&result, metrics::BlockSource::Rpc, "block"); @@ -331,14 +333,8 @@ impl NetworkBeaconProcessor { "Blobs have already been imported" ); } - Err(e) => { - warn!( - error = ?e, - block_hash = %block_root, - %slot, - "Error when importing rpc blobs" - ); - } + // Errors are handled and logged in `block_lookups` + Err(_) => {} } // Sync handles these results @@ -383,7 +379,7 @@ impl NetworkBeaconProcessor { "RPC custody data columns received" ); - let mut result = self + let result = self .chain .process_rpc_custody_columns(custody_columns) .await; @@ -404,17 +400,6 @@ impl NetworkBeaconProcessor { block_hash = %block_root, "Missing components over rpc" ); - // Attempt reconstruction here before notifying sync, to avoid sending out more requests - // that we may no longer need. - // We don't publish columns reconstructed from rpc columns to the gossip network, - // as these are likely historic columns. - let publish_columns = false; - if let Some(availability) = self - .attempt_data_column_reconstruction(block_root, publish_columns) - .await - { - result = Ok(availability) - } } }, Err(BlockError::DuplicateFullyImported(_)) => { @@ -423,13 +408,8 @@ impl NetworkBeaconProcessor { "Custody columns have already been imported" ); } - Err(e) => { - warn!( - error = ?e, - block_hash = %block_root, - "Error when importing rpc custody columns" - ); - } + // Errors are handled and logged in `block_lookups` + Err(_) => {} } self.send_sync_message(SyncMessage::BlockComponentProcessed { @@ -445,27 +425,42 @@ impl NetworkBeaconProcessor { parent = None, level = "debug", skip_all, - fields(sync_type = ?sync_type, downloaded_blocks = downloaded_blocks.len()) + fields(process_id = ?process_id, downloaded_blocks = downloaded_blocks.len()) )] pub async fn process_chain_segment( &self, - sync_type: ChainSegmentProcessId, + process_id: ChainSegmentProcessId, downloaded_blocks: Vec>, - notify_execution_layer: NotifyExecutionLayer, ) { - let result = match sync_type { - // this a request from the range sync - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { - let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); - let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); - let sent_blocks = downloaded_blocks.len(); + let ChainSegmentProcessId::RangeBatchId(chain_id, epoch) = process_id else { + // This is a request from range sync, this should _never_ happen + crit!( + error = "process_chain_segment called on a variant other than RangeBatchId", + "Please notify the devs" + ); + return; + }; - match self - .process_blocks(downloaded_blocks.iter(), notify_execution_layer) - .await - { - (imported_blocks, Ok(_)) => { - debug!( + let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); + let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); + let sent_blocks = downloaded_blocks.len(); + let notify_execution_layer = if self + .network_globals + .sync_state + .read() + .is_syncing_finalized() + { + NotifyExecutionLayer::No + } else { + NotifyExecutionLayer::Yes + }; + + let result = match self + .process_blocks(downloaded_blocks.iter(), notify_execution_layer) + .await + { + (imported_blocks, Ok(_)) => { + debug!( batch_epoch = %epoch, first_block_slot = start_slot, chain = chain_id, @@ -473,13 +468,13 @@ impl NetworkBeaconProcessor { processed_blocks = sent_blocks, service= "sync", "Batch processed"); - BatchProcessResult::Success { - sent_blocks, - imported_blocks, - } - } - (imported_blocks, Err(e)) => { - debug!( + BatchProcessResult::Success { + sent_blocks, + imported_blocks, + } + } + (imported_blocks, Err(e)) => { + debug!( batch_epoch = %epoch, first_block_slot = start_slot, chain = chain_id, @@ -488,33 +483,61 @@ impl NetworkBeaconProcessor { error = %e.message, service = "sync", "Batch processing failed"); - match e.peer_action { - Some(penalty) => BatchProcessResult::FaultyFailure { - imported_blocks, - penalty, - }, - None => BatchProcessResult::NonFaultyFailure, - } - } + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } - // this a request from the Backfill sync - ChainSegmentProcessId::BackSyncBatchId(epoch) => { - let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); - let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); - let sent_blocks = downloaded_blocks.len(); - let n_blobs = downloaded_blocks - .iter() - .map(|wrapped| wrapped.n_blobs()) - .sum::(); - let n_data_columns = downloaded_blocks - .iter() - .map(|wrapped| wrapped.n_data_columns()) - .sum::(); + }; - match self.process_backfill_blocks(downloaded_blocks) { - (imported_blocks, Ok(_)) => { - debug!( + self.send_sync_message(SyncMessage::BatchProcessed { + sync_type: process_id, + result, + }); + } + + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync + /// thread if more blocks are needed to process it. + #[instrument( + name = SPAN_PROCESS_CHAIN_SEGMENT_BACKFILL, + parent = None, + level = "debug", + skip_all, + fields(downloaded_blocks = downloaded_blocks.len()) + )] + pub fn process_chain_segment_backfill( + &self, + process_id: ChainSegmentProcessId, + downloaded_blocks: Vec>, + ) { + let ChainSegmentProcessId::BackSyncBatchId(epoch) = process_id else { + // this a request from RangeSync, this should _never_ happen + crit!( + error = + "process_chain_segment_backfill called on a variant other than BackSyncBatchId", + "Please notify the devs" + ); + return; + }; + + let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); + let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); + let sent_blocks = downloaded_blocks.len(); + let n_blobs = downloaded_blocks + .iter() + .map(|wrapped| wrapped.n_blobs()) + .sum::(); + let n_data_columns = downloaded_blocks + .iter() + .map(|wrapped| wrapped.n_data_columns()) + .sum::(); + + let result = match self.process_backfill_blocks(downloaded_blocks) { + (imported_blocks, Ok(_)) => { + debug!( batch_epoch = %epoch, first_block_slot = start_slot, keep_execution_payload = !self.chain.store.get_config().prune_payloads, @@ -524,34 +547,35 @@ impl NetworkBeaconProcessor { processed_data_columns = n_data_columns, service= "sync", "Backfill batch processed"); - BatchProcessResult::Success { - sent_blocks, - imported_blocks, - } - } - (_, Err(e)) => { - debug!( - batch_epoch = %epoch, - first_block_slot = start_slot, - last_block_slot = end_slot, - processed_blobs = n_blobs, - error = %e.message, - service = "sync", - "Backfill batch processing failed" - ); - match e.peer_action { - Some(penalty) => BatchProcessResult::FaultyFailure { - imported_blocks: 0, - penalty, - }, - None => BatchProcessResult::NonFaultyFailure, - } - } + BatchProcessResult::Success { + sent_blocks, + imported_blocks, + } + } + (_, Err(e)) => { + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + last_block_slot = end_slot, + processed_blobs = n_blobs, + error = %e.message, + service = "sync", + "Backfill batch processing failed" + ); + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: 0, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } }; - self.send_sync_message(SyncMessage::BatchProcessed { sync_type, result }); + self.send_sync_message(SyncMessage::BatchProcessed { + sync_type: process_id, + result, + }); } /// Helper function to process blocks batches which only consumes the chain and blocks to process. diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 2935c2d213..4137c974bf 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -458,10 +458,10 @@ impl TestRig { .unwrap(); } - pub fn enqueue_backfill_batch(&self) { + pub fn enqueue_backfill_batch(&self, epoch: Epoch) { self.network_beacon_processor .send_chain_segment( - ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + ChainSegmentProcessId::BackSyncBatchId(epoch), Vec::default(), ) .unwrap(); @@ -606,7 +606,7 @@ impl TestRig { } pub async fn assert_event_journal(&mut self, expected: &[&str]) { - self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT) + self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT, false, false) .await } @@ -623,6 +623,8 @@ impl TestRig { .chain(std::iter::once(NOTHING_TO_DO)) .collect::>(), timeout, + false, + false, ) .await } @@ -666,11 +668,21 @@ impl TestRig { &mut self, expected: &[&str], timeout: Duration, + ignore_worker_freed: bool, + ignore_nothing_to_do: bool, ) { let mut events = Vec::with_capacity(expected.len()); let drain_future = async { while let Some(event) = self.work_journal_rx.recv().await { + if event == WORKER_FREED && ignore_worker_freed { + continue; + } + + if event == NOTHING_TO_DO && ignore_nothing_to_do { + continue; + } + events.push(event); // Break as soon as we collect the desired number of events. @@ -1009,10 +1021,6 @@ async fn import_gossip_block_acceptably_early() { rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) .await; } - if num_data_columns > 0 { - rig.assert_event_journal_completes(&[WorkType::ColumnReconstruction]) - .await; - } // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for @@ -1388,6 +1396,8 @@ async fn requeue_unknown_block_gossip_attestation_without_import() { NOTHING_TO_DO, ], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, + false, + false, ) .await; @@ -1428,6 +1438,8 @@ async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { NOTHING_TO_DO, ], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, + false, + false, ) .await; @@ -1562,8 +1574,8 @@ async fn test_backfill_sync_processing() { // (not straight forward to manipulate `TestingSlotClock` due to cloning of `SlotClock` in code) // and makes the test very slow, hence timing calculation is unit tested separately in // `work_reprocessing_queue`. - for _ in 0..1 { - rig.enqueue_backfill_batch(); + for i in 0..1 { + rig.enqueue_backfill_batch(Epoch::new(i)); // ensure queued batch is not processed until later rig.assert_no_events_for(Duration::from_millis(100)).await; // A new batch should be processed within a slot. @@ -1574,6 +1586,8 @@ async fn test_backfill_sync_processing() { NOTHING_TO_DO, ], rig.chain.slot_clock.slot_duration(), + false, + false, ) .await; } @@ -1594,8 +1608,8 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { ) .await; - for _ in 0..3 { - rig.enqueue_backfill_batch(); + for i in 0..3 { + rig.enqueue_backfill_batch(Epoch::new(i)); } // ensure all batches are processed @@ -1606,6 +1620,8 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { WorkType::ChainSegmentBackfill.into(), ], Duration::from_millis(100), + true, + true, ) .await; } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 60fe094bb7..e17c54ead3 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -313,6 +313,7 @@ impl Router { Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) | Response::LightClientFinalityUpdate(_) + | Response::Ping(_) | Response::LightClientUpdatesByRange(_) => unreachable!(), } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c97206ea87..72d071f09f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -267,6 +267,10 @@ impl NetworkService { &beacon_chain.spec, )); + debug!(fork_name = ?fork_context.current_fork_name(), "Current fork"); + + let keypair = lighthouse_network::load_private_key(&config); + // construct the libp2p service context let service_context = Context { config: config.clone(), @@ -274,6 +278,8 @@ impl NetworkService { fork_context: fork_context.clone(), chain_spec: beacon_chain.spec.clone(), libp2p_registry, + keypair, + incoming_connections: None, }; // launch libp2p service @@ -284,6 +290,7 @@ impl NetworkService { .data_availability_checker .custody_context() .custody_group_count_at_head(&beacon_chain.spec), + None, ) .await?; @@ -474,6 +481,11 @@ impl NetworkService { shutdown_sender: &mut Sender, ) { match ev { + // mallory event + NetworkEvent::MallorySwarmEvent(_) => {} + // mallory event + NetworkEvent::IdentifyReceived(_) => {} + NetworkEvent::PeerConnectedOutgoing(peer_id) => { self.send_to_router(RouterMessage::StatusPeer(peer_id)); } @@ -840,6 +852,7 @@ impl NetworkService { new_fork = ?new_fork_name, "Transitioned to new fork" ); + new_fork_name.fork_ascii(); } fork_context.update_current_fork(*new_fork_name, new_fork_digest, current_epoch); diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs deleted file mode 100644 index 0da27c6a21..0000000000 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ /dev/null @@ -1,681 +0,0 @@ -//! This service keeps track of which shard subnet the beacon node should be subscribed to at any -//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and -//! determines whether attestations should be aggregated and/or passed to the beacon node. - -use super::SubnetServiceMessage; -use std::collections::HashSet; -use std::collections::{HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use delay_map::{HashMapDelay, HashSetDelay}; -use futures::prelude::*; -use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; -use slot_clock::SlotClock; -use tracing::{debug, error, info, trace, warn}; -use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; - -use crate::metrics; - -/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the -/// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; -/// The fraction of a slot that we subscribe to a subnet before the required slot. -/// -/// Currently a whole slot ahead. -const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; - -/// The number of slots after an aggregator duty where we remove the entry from -/// `aggregate_validators_on_subnet` delay map. -const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; - -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub(crate) enum SubscriptionKind { - /// Long lived subscriptions. - /// - /// These have a longer duration and are advertised in our ENR. - LongLived, - /// Short lived subscriptions. - /// - /// Subscribing to these subnets has a short duration and we don't advertise it in our ENR. - ShortLived, -} - -/// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] -pub struct ExactSubnet { - /// The `SubnetId` associated with this subnet. - pub subnet_id: SubnetId, - /// The `Slot` associated with this subnet. - pub slot: Slot, -} - -pub struct AttestationService { - /// Queued events to return to the driving service. - events: VecDeque, - - /// A reference to the beacon chain to process received attestations. - pub(crate) beacon_chain: Arc>, - - /// Subnets we are currently subscribed to as short lived subscriptions. - /// - /// Once they expire, we unsubscribe from these. - /// We subscribe to subnets when we are an aggregator for an exact subnet. - short_lived_subscriptions: HashMapDelay, - - /// Subnets we are currently subscribed to as long lived subscriptions. - /// - /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. - /// These are required of all beacon nodes. The exact number is determined by the chain - /// specification. - long_lived_subscriptions: HashSet, - - /// Short lived subscriptions that need to be executed in the future. - scheduled_short_lived_subscriptions: HashSetDelay, - - /// A collection timeouts to track the existence of aggregate validator subscriptions at an - /// `ExactSubnet`. - aggregate_validators_on_subnet: Option>, - - /// The waker for the current thread. - waker: Option, - - /// The discovery mechanism of lighthouse is disabled. - discovery_disabled: bool, - - /// We are always subscribed to all subnets. - subscribe_all_subnets: bool, - - /// Our Discv5 node_id. - node_id: NodeId, - - /// Future used to manage subscribing and unsubscribing from long lived subnets. - next_long_lived_subscription_event: Pin>, - - /// Whether this node is a block proposer-only node. - proposer_only: bool, -} - -impl AttestationService { - /* Public functions */ - - /// Establish the service based on the passed configuration. - pub fn new(beacon_chain: Arc>, node_id: NodeId, config: &NetworkConfig) -> Self { - let slot_duration = beacon_chain.slot_clock.slot_duration(); - - if config.subscribe_all_subnets { - info!("Subscribing to all subnets"); - } else { - info!( - subnets_per_node = beacon_chain.spec.subnets_per_node, - subscription_duration_in_epochs = beacon_chain.spec.epochs_per_subnet_subscription, - "Deterministic long lived subnets enabled" - ); - } - - let track_validators = !config.import_all_attestations; - let aggregate_validators_on_subnet = - track_validators.then(|| HashSetDelay::new(slot_duration)); - let mut service = AttestationService { - events: VecDeque::with_capacity(10), - beacon_chain, - short_lived_subscriptions: HashMapDelay::new(slot_duration), - long_lived_subscriptions: HashSet::default(), - scheduled_short_lived_subscriptions: HashSetDelay::default(), - aggregate_validators_on_subnet, - waker: None, - discovery_disabled: config.disable_discovery, - subscribe_all_subnets: config.subscribe_all_subnets, - node_id, - next_long_lived_subscription_event: { - // Set a dummy sleep. Calculating the current subnet subscriptions will update this - // value with a smarter timing - Box::pin(tokio::time::sleep(Duration::from_secs(1))) - }, - proposer_only: config.proposer_only, - }; - - // If we are not subscribed to all subnets, handle the deterministic set of subnets - if !config.subscribe_all_subnets { - service.recompute_long_lived_subnets(); - } - - service - } - - /// Return count of all currently subscribed subnets (long-lived **and** short-lived). - #[cfg(test)] - pub fn subscription_count(&self) -> usize { - if self.subscribe_all_subnets { - self.beacon_chain.spec.attestation_subnet_count as usize - } else { - let count = self - .short_lived_subscriptions - .keys() - .chain(self.long_lived_subscriptions.iter()) - .collect::>() - .len(); - count - } - } - - /// Returns whether we are subscribed to a subnet for testing purposes. - #[cfg(test)] - pub(crate) fn is_subscribed( - &self, - subnet_id: &SubnetId, - subscription_kind: SubscriptionKind, - ) -> bool { - match subscription_kind { - SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), - SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), - } - } - - #[cfg(test)] - pub(crate) fn long_lived_subscriptions(&self) -> &HashSet { - &self.long_lived_subscriptions - } - - /// Processes a list of validator subscriptions. - /// - /// This will: - /// - Register new validators as being known. - /// - Search for peers for required subnets. - /// - Request subscriptions for subnets on specific slots when required. - /// - Build the timeouts for each of these events. - /// - /// This returns a result simply for the ergonomics of using ?. The result can be - /// safely dropped. - pub fn validator_subscriptions( - &mut self, - subscriptions: impl Iterator, - ) -> Result<(), String> { - // If the node is in a proposer-only state, we ignore all subnet subscriptions. - if self.proposer_only { - return Ok(()); - } - - // Maps each subnet_id subscription to it's highest slot - let mut subnets_to_discover: HashMap = HashMap::new(); - - // Registers the validator with the attestation service. - for subscription in subscriptions { - metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); - - trace!(?subscription, "Validator subscription"); - - // Compute the subnet that is associated with this subscription - let subnet_id = match SubnetId::compute_subnet::( - subscription.slot, - subscription.attestation_committee_index, - subscription.committee_count_at_slot, - &self.beacon_chain.spec, - ) { - Ok(subnet_id) => subnet_id, - Err(e) => { - warn!( - error = ?e, - "Failed to compute subnet id for validator subscription" - ); - continue; - } - }; - // Ensure each subnet_id inserted into the map has the highest slot as it's value. - // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. - if let Some(slot) = subnets_to_discover.get(&subnet_id) { - if subscription.slot > *slot { - subnets_to_discover.insert(subnet_id, subscription.slot); - } - } else if !self.discovery_disabled { - subnets_to_discover.insert(subnet_id, subscription.slot); - } - - let exact_subnet = ExactSubnet { - subnet_id, - slot: subscription.slot, - }; - - // Determine if the validator is an aggregator. If so, we subscribe to the subnet and - // if successful add the validator to a mapping of known aggregators for that exact - // subnet. - - if subscription.is_aggregator { - metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); - if let Err(e) = self.subscribe_to_short_lived_subnet(exact_subnet) { - warn!(error = e, "Subscription to subnet error"); - } else { - trace!(?exact_subnet, "Subscribed to subnet for aggregator duties"); - } - } - } - - // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the - // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request( - subnets_to_discover - .into_iter() - .map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }), - ) { - warn!(error = e, "Discovery lookup request error"); - }; - } - - Ok(()) - } - - fn recompute_long_lived_subnets(&mut self) { - // Ensure the next computation is scheduled even if assigning subnets fails. - let next_subscription_event = self - .recompute_long_lived_subnets_inner() - .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); - - debug!("Recomputing deterministic long lived subnets"); - self.next_long_lived_subscription_event = - Box::pin(tokio::time::sleep(next_subscription_event)); - - if let Some(waker) = self.waker.as_ref() { - waker.wake_by_ref(); - } - } - - /// Gets the long lived subnets the node should be subscribed to during the current epoch and - /// the remaining duration for which they remain valid. - fn recompute_long_lived_subnets_inner(&mut self) -> Result { - let current_epoch = self.beacon_chain.epoch().map_err(|e| { - if !self - .beacon_chain - .slot_clock - .is_prior_to_genesis() - .unwrap_or(false) - { - error!(err = ?e,"Failed to get the current epoch from clock") - } - })?; - - let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( - self.node_id.raw(), - current_epoch, - &self.beacon_chain.spec, - ) - .map_err(|e| error!(err = e, "Could not compute subnets for current epoch"))?; - - let next_subscription_slot = - next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let next_subscription_event = self - .beacon_chain - .slot_clock - .duration_to_slot(next_subscription_slot) - .ok_or_else(|| { - error!("Failed to compute duration to next to long lived subscription event") - })?; - - self.update_long_lived_subnets(subnets.collect()); - - Ok(next_subscription_event) - } - - /// Updates the long lived subnets. - /// - /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr - /// updated accordingly. - fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { - info!(subnets = ?subnets.iter().collect::>(),"Subscribing to long-lived subnets"); - for subnet in &subnets { - // Add the events for those subnets that are new as long lived subscriptions. - if !self.long_lived_subscriptions.contains(subnet) { - // Check if this subnet is new and send the subscription event if needed. - if !self.short_lived_subscriptions.contains_key(subnet) { - debug!( - ?subnet, - subscription_kind = ?SubscriptionKind::LongLived, - "Subscribing to subnet" - ); - self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( - *subnet, - ))); - } - self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet))); - if !self.discovery_disabled { - self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet: Subnet::Attestation(*subnet), - min_ttl: None, - }])) - } - } - } - - // Update the long_lived_subnets set and check for subnets that are being removed - std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); - for subnet in subnets { - if !self.long_lived_subscriptions.contains(&subnet) { - self.handle_removed_subnet(subnet, SubscriptionKind::LongLived); - } - } - } - - /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip - /// verification, re-propagates and returns false. - pub fn should_process_attestation( - &self, - subnet: SubnetId, - attestation: &Attestation, - ) -> bool { - // Proposer-only mode does not need to process attestations - if self.proposer_only { - return false; - } - self.aggregate_validators_on_subnet - .as_ref() - .map(|tracked_vals| { - tracked_vals.contains_key(&ExactSubnet { - subnet_id: subnet, - slot: attestation.data().slot, - }) - }) - .unwrap_or(true) - } - - /* Internal private functions */ - - /// Adds an event to the event queue and notifies that this service is ready to be polled - /// again. - fn queue_event(&mut self, ev: SubnetServiceMessage) { - self.events.push_back(ev); - if let Some(waker) = &self.waker { - waker.wake_by_ref() - } - } - /// Checks if there are currently queued discovery requests and the time required to make the - /// request. - /// - /// If there is sufficient time, queues a peer discovery request for all the required subnets. - fn discover_peers_request( - &mut self, - exact_subnets: impl Iterator, - ) -> Result<(), &'static str> { - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let discovery_subnets: Vec = exact_subnets - .filter_map(|exact_subnet| { - // Check if there is enough time to perform a discovery lookup. - if exact_subnet.slot - >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) - { - // Send out an event to start looking for peers. - // Require the peer for an additional slot to ensure we keep the peer for the - // duration of the subscription. - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot + 1) - .map(|duration| std::time::Instant::now() + duration); - Some(SubnetDiscovery { - subnet: Subnet::Attestation(exact_subnet.subnet_id), - min_ttl, - }) - } else { - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - warn!( - subnet_id = ?exact_subnet, - "Not enough time for a discovery search" - ); - None - } - }) - .collect(); - - if !discovery_subnets.is_empty() { - self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); - } - Ok(()) - } - - // Subscribes to the subnet if it should be done immediately, or schedules it if required. - fn subscribe_to_short_lived_subnet( - &mut self, - ExactSubnet { subnet_id, slot }: ExactSubnet, - ) -> Result<(), &'static str> { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // The short time we schedule the subscription before it's actually required. This - // ensures we are subscribed on time, and allows consecutive subscriptions to the same - // subnet to overlap, reducing subnet churn. - let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; - // The time to the required slot. - let time_to_subscription_slot = self - .beacon_chain - .slot_clock - .duration_to_slot(slot) - .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. - - // Calculate how long before we need to subscribe to the subnet. - let time_to_subscription_start = - time_to_subscription_slot.saturating_sub(advance_subscription_duration); - - // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` - // delay map. - let time_to_unsubscribe = - time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); - } - - // If the subscription should be done in the future, schedule it. Otherwise subscribe - // immediately. - if time_to_subscription_start.is_zero() { - // This is a current or past slot, we subscribe immediately. - self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1)?; - } else { - // This is a future slot, schedule subscribing. - trace!(subnet = ?subnet_id, ?time_to_subscription_start,"Scheduling subnet subscription"); - self.scheduled_short_lived_subscriptions - .insert_at(ExactSubnet { subnet_id, slot }, time_to_subscription_start); - } - - Ok(()) - } - - /* A collection of functions that handle the various timeouts */ - - /// Registers a subnet as subscribed. - /// - /// Checks that the time in which the subscription would end is not in the past. If we are - /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send - /// out the appropriate events. - /// - /// On determinist long lived subnets, this is only used for short lived subscriptions. - fn subscribe_to_short_lived_subnet_immediately( - &mut self, - subnet_id: SubnetId, - end_slot: Slot, - ) -> Result<(), &'static str> { - if self.subscribe_all_subnets { - // Case not handled by this service. - return Ok(()); - } - - let time_to_subscription_end = self - .beacon_chain - .slot_clock - .duration_to_slot(end_slot) - .unwrap_or_default(); - - // First check this is worth doing. - if time_to_subscription_end.is_zero() { - return Err("Time when subscription would end has already passed."); - } - - let subscription_kind = SubscriptionKind::ShortLived; - - // We need to check and add a subscription for the right kind, regardless of the presence - // of the subnet as a subscription of the other kind. This is mainly since long lived - // subscriptions can be removed at any time when a validator goes offline. - - let (subscriptions, already_subscribed_as_other_kind) = ( - &mut self.short_lived_subscriptions, - self.long_lived_subscriptions.contains(&subnet_id), - ); - - match subscriptions.get(&subnet_id) { - Some(current_end_slot) => { - // We are already subscribed. Check if we need to extend the subscription. - if &end_slot > current_end_slot { - trace!( - subnet = ?subnet_id, - prev_end_slot = %current_end_slot, - new_end_slot = %end_slot, - ?subscription_kind, - "Extending subscription to subnet" - ); - subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); - } - } - None => { - // This is a new subscription. Add with the corresponding timeout and send the - // notification. - subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); - - // Inform of the subscription. - if !already_subscribed_as_other_kind { - debug!( - subnet = ?subnet_id, - %end_slot, - ?subscription_kind, - "Subscribing to subnet" - ); - self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id, - ))); - } - } - } - - Ok(()) - } - - // Unsubscribes from a subnet that was removed if it does not continue to exist as a - // subscription of the other kind. For long lived subscriptions, it also removes the - // advertisement from our ENR. - fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { - let exists_in_other_subscriptions = match subscription_kind { - SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), - SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), - }; - - if !exists_in_other_subscriptions { - // Subscription no longer exists as short lived or long lived. - debug!( - subnet = ?subnet_id, - ?subscription_kind, - "Unsubscribing from subnet" - ); - self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - subnet_id, - ))); - } - - if subscription_kind == SubscriptionKind::LongLived { - // Remove from our ENR even if we remain subscribed in other way. - self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation( - subnet_id, - ))); - } - } -} - -impl Stream for AttestationService { - type Item = SubnetServiceMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Update the waker if needed. - if let Some(waker) = &self.waker { - if !waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - // Send out any generated events. - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); - } - - // If we aren't subscribed to all subnets, handle the deterministic long-lived subnets - if !self.subscribe_all_subnets { - match self.next_long_lived_subscription_event.as_mut().poll(cx) { - Poll::Ready(_) => { - self.recompute_long_lived_subnets(); - // We re-wake the task as there could be other subscriptions to process - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Pending => {} - } - } - - // Process scheduled subscriptions that might be ready, since those can extend a soon to - // expire subscription. - match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { - if let Err(e) = - self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1) - { - debug!(subnet = ?subnet_id, err = e,"Failed to subscribe to short lived subnet"); - } - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Ready(Some(Err(e))) => { - error!( - error = e, - "Failed to check for scheduled subnet subscriptions" - ); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // Finally process any expired subscriptions. - match self.short_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { - self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); - // We re-wake the task as there could be other subscriptions to process - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Ready(Some(Err(e))) => { - error!(error = e, "Failed to check for subnet unsubscription times"); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // Poll to remove entries on expiration, no need to act on expiration events. - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { - error!( - error = e, - "Failed to check for aggregate validator on subnet expirations" - ); - } - } - - Poll::Pending - } -} diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs deleted file mode 100644 index 6b3834e195..0000000000 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ /dev/null @@ -1,345 +0,0 @@ -//! This service keeps track of which sync committee subnet the beacon node should be subscribed to at any -//! given time. It schedules subscriptions to sync committee subnets and requests peer discoveries. - -use std::collections::{hash_map::Entry, HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use futures::prelude::*; -use tracing::{debug, error, trace, warn}; - -use super::SubnetServiceMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use delay_map::HashSetDelay; -use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; -use slot_clock::SlotClock; -use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; - -use crate::metrics; - -/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the -/// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; - -/// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -pub struct ExactSubnet { - /// The `SyncSubnetId` associated with this subnet. - pub subnet_id: SyncSubnetId, - /// The epoch until which we need to stay subscribed to the subnet. - pub until_epoch: Epoch, -} -pub struct SyncCommitteeService { - /// Queued events to return to the driving service. - events: VecDeque, - - /// A reference to the beacon chain to process received attestations. - pub(crate) beacon_chain: Arc>, - - /// The collection of all currently subscribed subnets. - subscriptions: HashMap, - - /// A collection of timeouts for when to unsubscribe from a subnet. - unsubscriptions: HashSetDelay, - - /// The waker for the current thread. - waker: Option, - - /// The discovery mechanism of lighthouse is disabled. - discovery_disabled: bool, - - /// We are always subscribed to all subnets. - subscribe_all_subnets: bool, - - /// Whether this node is a block proposer-only node. - proposer_only: bool, -} - -impl SyncCommitteeService { - /* Public functions */ - - pub fn new(beacon_chain: Arc>, config: &NetworkConfig) -> Self { - let spec = &beacon_chain.spec; - let epoch_duration_secs = - beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); - let default_timeout = - epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()); - - SyncCommitteeService { - events: VecDeque::with_capacity(10), - beacon_chain, - subscriptions: HashMap::new(), - unsubscriptions: HashSetDelay::new(Duration::from_secs(default_timeout)), - waker: None, - subscribe_all_subnets: config.subscribe_all_subnets, - discovery_disabled: config.disable_discovery, - proposer_only: config.proposer_only, - } - } - - /// Return count of all currently subscribed subnets. - #[cfg(test)] - pub fn subscription_count(&self) -> usize { - use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; - if self.subscribe_all_subnets { - SYNC_COMMITTEE_SUBNET_COUNT as usize - } else { - self.subscriptions.len() - } - } - - /// Processes a list of sync committee subscriptions. - /// - /// This will: - /// - Search for peers for required subnets. - /// - Request subscriptions required subnets. - /// - Build the timeouts for each of these events. - /// - /// This returns a result simply for the ergonomics of using ?. The result can be - /// safely dropped. - pub fn validator_subscriptions( - &mut self, - subscriptions: Vec, - ) -> Result<(), String> { - // A proposer-only node does not subscribe to any sync-committees - if self.proposer_only { - return Ok(()); - } - - let mut subnets_to_discover = Vec::new(); - for subscription in subscriptions { - metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); - //NOTE: We assume all subscriptions have been verified before reaching this service - - // Registers the validator with the subnet service. - // This will subscribe to long-lived random subnets if required. - trace!(?subscription, "Sync committee subscription"); - - let subnet_ids = match SyncSubnetId::compute_subnets_for_sync_committee::( - &subscription.sync_committee_indices, - ) { - Ok(subnet_ids) => subnet_ids, - Err(e) => { - warn!( - error = ?e, - validator_index = subscription.validator_index, - "Failed to compute subnet id for sync committee subscription" - ); - continue; - } - }; - - for subnet_id in subnet_ids { - let exact_subnet = ExactSubnet { - subnet_id, - until_epoch: subscription.until_epoch, - }; - subnets_to_discover.push(exact_subnet.clone()); - if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { - warn!( - error = e, - validator_index = subscription.validator_index, - "Subscription to sync subnet error" - ); - } else { - trace!( - ?exact_subnet, - validator_index = subscription.validator_index, - "Subscribed to subnet for sync committee duties" - ); - } - } - } - // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the - // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request(subnets_to_discover.iter()) { - warn!(error = e, "Discovery lookup request error"); - }; - } - - // pre-emptively wake the thread to check for new events - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } - Ok(()) - } - - /* Internal private functions */ - - /// Checks if there are currently queued discovery requests and the time required to make the - /// request. - /// - /// If there is sufficient time, queues a peer discovery request for all the required subnets. - fn discover_peers_request<'a>( - &mut self, - exact_subnets: impl Iterator, - ) -> Result<(), &'static str> { - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - - let discovery_subnets: Vec = exact_subnets - .filter_map(|exact_subnet| { - let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); - // check if there is enough time to perform a discovery lookup - if until_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { - // if the slot is more than epoch away, add an event to start looking for peers - // add one slot to ensure we keep the peer for the subscription slot - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(until_slot + 1) - .map(|duration| std::time::Instant::now() + duration); - Some(SubnetDiscovery { - subnet: Subnet::SyncCommittee(exact_subnet.subnet_id), - min_ttl, - }) - } else { - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - warn!( - subnet_id = ?exact_subnet, - "Not enough time for a discovery search" - ); - None - } - }) - .collect(); - - if !discovery_subnets.is_empty() { - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); - } - Ok(()) - } - - /// Adds a subscription event and an associated unsubscription event if required. - fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { - // Return if we have subscribed to all subnets - if self.subscribe_all_subnets { - return Ok(()); - } - - // Return if we already have a subscription for exact_subnet - if self.subscriptions.get(&exact_subnet.subnet_id) == Some(&exact_subnet.until_epoch) { - return Ok(()); - } - - // Return if we already have subscription set to expire later than the current request. - if let Some(until_epoch) = self.subscriptions.get(&exact_subnet.subnet_id) { - if *until_epoch >= exact_subnet.until_epoch { - return Ok(()); - } - } - - // initialise timing variables - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); - // Calculate the duration to the unsubscription event. - let expected_end_subscription_duration = if current_slot >= until_slot { - warn!( - %current_slot, - ?exact_subnet, - "Sync committee subscription is past expiration" - ); - return Ok(()); - } else { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // the duration until we no longer need this subscription. We assume a single slot is - // sufficient. - self.beacon_chain - .slot_clock - .duration_to_slot(until_slot) - .ok_or("Unable to determine duration to unsubscription slot")? - + slot_duration - }; - - if let Entry::Vacant(e) = self.subscriptions.entry(exact_subnet.subnet_id) { - // We are not currently subscribed and have no waiting subscription, create one - debug!(subnet = *exact_subnet.subnet_id, until_epoch = ?exact_subnet.until_epoch, "Subscribing to subnet"); - e.insert(exact_subnet.until_epoch); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::SyncCommittee( - exact_subnet.subnet_id, - ))); - - // add the subnet to the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee( - exact_subnet.subnet_id, - ))); - - // add an unsubscription event to remove ourselves from the subnet once completed - self.unsubscriptions - .insert_at(exact_subnet.subnet_id, expected_end_subscription_duration); - } else { - // We are already subscribed, extend the unsubscription duration - self.unsubscriptions - .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); - } - - Ok(()) - } - - /// A queued unsubscription is ready. - fn handle_unsubscriptions(&mut self, subnet_id: SyncSubnetId) { - debug!(subnet = *subnet_id, "Unsubscribing from subnet"); - - self.subscriptions.remove(&subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::SyncCommittee( - subnet_id, - ))); - - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::SyncCommittee( - subnet_id, - ))); - } -} - -impl Stream for SyncCommitteeService { - type Item = SubnetServiceMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // update the waker if needed - if let Some(waker) = &self.waker { - if !waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - // process any un-subscription events - match self.unsubscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), - Poll::Ready(Some(Err(e))) => { - error!(error = e, "Failed to check for subnet unsubscription times"); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // process any generated events - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); - } - - Poll::Pending - } -} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index b60c21972f..f8ffd298ca 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -59,7 +59,7 @@ mod single_block_lookup; /// reaches the maximum depth it will force trigger range sync. pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE; -const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; +const IGNORED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; /// Maximum time we allow a lookup to exist before assuming it is stuck and will never make @@ -110,8 +110,10 @@ enum Action { } pub struct BlockLookups { - /// A cache of failed chain lookups to prevent duplicate searches. - failed_chains: LRUTimeCache, + /// A cache of block roots that must be ignored for some time to prevent useless searches. For + /// example if a chain is too long, its lookup chain is dropped, and range sync is expected to + /// eventually sync those blocks + ignored_chains: LRUTimeCache, // TODO: Why not index lookups by block_root? single_block_lookups: FnvHashMap>, @@ -128,21 +130,21 @@ pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec) impl BlockLookups { pub fn new() -> Self { Self { - failed_chains: LRUTimeCache::new(Duration::from_secs( - FAILED_CHAINS_CACHE_EXPIRY_SECONDS, + ignored_chains: LRUTimeCache::new(Duration::from_secs( + IGNORED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), } } #[cfg(test)] - pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { - self.failed_chains.insert(block_root); + pub(crate) fn insert_ignored_chain(&mut self, block_root: Hash256) { + self.ignored_chains.insert(block_root); } #[cfg(test)] - pub(crate) fn get_failed_chains(&mut self) -> Vec { - self.failed_chains.keys().cloned().collect() + pub(crate) fn get_ignored_chains(&mut self) -> Vec { + self.ignored_chains.keys().cloned().collect() } #[cfg(test)] @@ -184,7 +186,7 @@ impl BlockLookups { self.search_parent_of_child(parent_root, block_root, &[peer_id], cx); // Only create the child lookup if the parent exists if parent_lookup_exists { - // `search_parent_of_child` ensures that parent root is not a failed chain + // `search_parent_of_child` ensures that the parent lookup exists so we can safely wait for it self.new_current_lookup( block_root, Some(block_component), @@ -244,8 +246,8 @@ impl BlockLookups { debug!(block_root = ?block_root_to_search, "Parent lookup chain too long"); // Searching for this parent would extend a parent chain over the max - // Insert the tip only to failed chains - self.failed_chains.insert(parent_chain.tip); + // Insert the tip only to chains to ignore + self.ignored_chains.insert(parent_chain.tip); // Note: Drop only the chain that's too long until it merges with another chain // that's not too long. Consider this attack: there's a chain of valid unknown @@ -330,12 +332,9 @@ impl BlockLookups { peers: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { - // If this block or it's parent is part of a known failed chain, ignore it. - if self.failed_chains.contains(&block_root) { - debug!(?block_root, "Block is from a past failed chain. Dropping"); - for peer_id in peers { - cx.report_peer(*peer_id, PeerAction::MidToleranceError, "failed_chain"); - } + // If this block or it's parent is part of a known ignored chain, ignore it. + if self.ignored_chains.contains(&block_root) { + debug!(?block_root, "Dropping lookup for block marked ignored"); return false; } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 36509d2563..8fb3248a87 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -219,7 +219,7 @@ impl SingleBlockLookup { // can assert that this is the correct value of `blob_kzg_commitments_count`. match cx.chain.get_block_process_status(&self.block_root) { BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) + BlockProcessStatus::NotValidated(block, _) | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), } }) { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 448e784ab6..d7ba028054 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -328,13 +328,13 @@ impl SyncManager { } #[cfg(test)] - pub(crate) fn get_failed_chains(&mut self) -> Vec { - self.block_lookups.get_failed_chains() + pub(crate) fn get_ignored_chains(&mut self) -> Vec { + self.block_lookups.get_ignored_chains() } #[cfg(test)] - pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { - self.block_lookups.insert_failed_chain(block_root); + pub(crate) fn insert_ignored_chain(&mut self, block_root: Hash256) { + self.block_lookups.insert_ignored_chain(block_root); } #[cfg(test)] diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 17a4295700..ac2991c147 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -49,8 +49,8 @@ use tokio::sync::mpsc; use tracing::{Span, debug, debug_span, error, warn}; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, ForkContext, - Hash256, SignedBeaconBlock, Slot, + BlobSidecar, BlockImportSource, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, + ForkContext, Hash256, SignedBeaconBlock, Slot, }; pub mod custody; @@ -835,14 +835,26 @@ impl SyncNetworkContext { match self.chain.get_block_process_status(&block_root) { // Unknown block, continue request to download BlockProcessStatus::Unknown => {} - // Block is known are currently processing, expect a future event with the result of - // processing. - BlockProcessStatus::NotValidated { .. } => { - // Lookup sync event safety: If the block is currently in the processing cache, we - // are guaranteed to receive a `SyncMessage::GossipBlockProcessResult` that will - // make progress on this lookup - return Ok(LookupRequestResult::Pending("block in processing cache")); - } + // Block is known and currently processing. Imports from gossip and HTTP API insert the + // block in the da_cache. However, HTTP API is unable to notify sync when it completes + // block import. Returning `Pending` here will result in stuck lookups if the block is + // importing from sync. + BlockProcessStatus::NotValidated(_, source) => match source { + BlockImportSource::Gossip => { + // Lookup sync event safety: If the block is currently in the processing cache, we + // are guaranteed to receive a `SyncMessage::GossipBlockProcessResult` that will + // make progress on this lookup + return Ok(LookupRequestResult::Pending("block in processing cache")); + } + BlockImportSource::Lookup + | BlockImportSource::RangeSync + | BlockImportSource::HttpApi => { + // Lookup, RangeSync or HttpApi block import don't emit the GossipBlockProcessResult + // event. If a lookup happens to be created during block import from one of + // those sources just import the block twice. Otherwise the lookup will get + // stuck. Double imports are fine, they just waste resources. + } + }, // Block is fully validated. If it's not yet imported it's waiting for missing block // components. Consider this request completed and do nothing. BlockProcessStatus::ExecutionValidated { .. } => { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index b5bc10851d..fc64186175 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -41,8 +41,8 @@ use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; use tracing::info; use types::{ - BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, ForkName, - Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, + ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; @@ -285,21 +285,21 @@ impl TestRig { ); } - fn insert_failed_chain(&mut self, block_root: Hash256) { - self.sync_manager.insert_failed_chain(block_root); + fn insert_ignored_chain(&mut self, block_root: Hash256) { + self.sync_manager.insert_ignored_chain(block_root); } - fn assert_not_failed_chain(&mut self, chain_hash: Hash256) { - let failed_chains = self.sync_manager.get_failed_chains(); - if failed_chains.contains(&chain_hash) { - panic!("failed chains contain {chain_hash:?}: {failed_chains:?}"); + fn assert_not_ignored_chain(&mut self, chain_hash: Hash256) { + let chains = self.sync_manager.get_ignored_chains(); + if chains.contains(&chain_hash) { + panic!("ignored chains contain {chain_hash:?}: {chains:?}"); } } - fn assert_failed_chain(&mut self, chain_hash: Hash256) { - let failed_chains = self.sync_manager.get_failed_chains(); - if !failed_chains.contains(&chain_hash) { - panic!("expected failed chains to contain {chain_hash:?}: {failed_chains:?}"); + fn assert_ignored_chain(&mut self, chain_hash: Hash256) { + let chains = self.sync_manager.get_ignored_chains(); + if !chains.contains(&chain_hash) { + panic!("expected ignored chains to contain {chain_hash:?}: {chains:?}"); } } @@ -1021,11 +1021,6 @@ impl TestRig { self.log(&format!("Found expected penalty {penalty_msg}")); } - pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { - self.expect_penalty(peer_id, expect_penalty_msg); - self.expect_no_penalty_for(peer_id); - } - pub fn block_with_parent_and_blobs( &mut self, parent_root: Hash256, @@ -1084,7 +1079,7 @@ impl TestRig { .harness .chain .data_availability_checker - .put_pending_executed_block(executed_block) + .put_executed_block(executed_block) .unwrap() { Availability::Available(_) => panic!("block removed from da_checker, available"), @@ -1114,20 +1109,19 @@ impl TestRig { }; } - fn insert_block_to_processing_cache(&mut self, block: Arc>) { + fn insert_block_to_availability_cache(&mut self, block: Arc>) { self.harness .chain - .reqresp_pre_import_cache - .write() - .insert(block.canonical_root(), block); + .data_availability_checker + .put_pre_execution_block(block.canonical_root(), block, BlockImportSource::Gossip) + .unwrap(); } fn simulate_block_gossip_processing_becomes_invalid(&mut self, block_root: Hash256) { self.harness .chain - .reqresp_pre_import_cache - .write() - .remove(&block_root); + .data_availability_checker + .remove_block_on_execution_error(&block_root); self.send_sync_message(SyncMessage::GossipBlockProcessResult { block_root, @@ -1140,11 +1134,6 @@ impl TestRig { block: Arc>, ) { let block_root = block.canonical_root(); - self.harness - .chain - .reqresp_pre_import_cache - .write() - .remove(&block_root); self.insert_block_to_da_checker(block); @@ -1461,7 +1450,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); for i in 1..=PARENT_FAIL_TOLERANCE { - rig.assert_not_failed_chain(block_root); + rig.assert_not_ignored_chain(block_root); let id = rig.expect_block_parent_request(parent_root); if i % 2 != 0 { // The request fails. It should be tried again. @@ -1474,8 +1463,8 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { } } - rig.assert_not_failed_chain(block_root); - rig.assert_not_failed_chain(parent.canonical_root()); + rig.assert_not_ignored_chain(block_root); + rig.assert_not_ignored_chain(parent.canonical_root()); rig.expect_no_active_lookups_empty_network(); } @@ -1500,7 +1489,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { for _ in 0..PROCESSING_FAILURES { let id = rig.expect_block_parent_request(parent_root); // Blobs are only requested in the previous first iteration as this test only retries blocks - rig.assert_not_failed_chain(block_root); + rig.assert_not_ignored_chain(block_root); // send the right parent but fail processing rig.parent_lookup_block_response(id, peer_id, Some(parent.clone().into())); rig.parent_block_processed(block_root, BlockError::BlockSlotLimitReached.into()); @@ -1508,7 +1497,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { rig.expect_penalty(peer_id, "lookup_block_processing_failure"); } - rig.assert_not_failed_chain(block_root); + rig.assert_not_ignored_chain(block_root); rig.expect_no_active_lookups_empty_network(); } @@ -1551,12 +1540,14 @@ fn test_parent_lookup_too_deep_grow_ancestor() { ); // Should not penalize peer, but network is not clear because of the blocks_by_range requests rig.expect_no_penalty_for(peer_id); - rig.assert_failed_chain(chain_hash); + rig.assert_ignored_chain(chain_hash); } // Regression test for https://github.com/sigp/lighthouse/pull/7118 +// 8042 UPDATE: block was previously added to the failed_chains cache, now it's inserted into the +// ignored chains cache. The regression test still applies as the chaild lookup is not created #[test] -fn test_child_lookup_not_created_for_failed_chain_parent_after_processing() { +fn test_child_lookup_not_created_for_ignored_chain_parent_after_processing() { // GIVEN: A parent chain longer than PARENT_DEPTH_TOLERANCE. let mut rig = TestRig::test_setup(); let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE + 1); @@ -1586,8 +1577,8 @@ fn test_child_lookup_not_created_for_failed_chain_parent_after_processing() { } // At this point, the chain should have been deemed too deep and pruned. - // The tip root should have been inserted into failed chains. - rig.assert_failed_chain(tip_root); + // The tip root should have been inserted into ignored chains. + rig.assert_ignored_chain(tip_root); rig.expect_no_penalty_for(peer_id); // WHEN: Trigger the extending block that points to the tip. @@ -1604,10 +1595,10 @@ fn test_child_lookup_not_created_for_failed_chain_parent_after_processing() { }), ); - // THEN: The extending block should not create a lookup because the tip was inserted into failed chains. + // THEN: The extending block should not create a lookup because the tip was inserted into + // ignored chains. rig.expect_no_active_lookups(); - // AND: The peer should be penalized for extending a failed chain. - rig.expect_single_penalty(peer_id, "failed_chain"); + rig.expect_no_penalty_for(peer_id); rig.expect_empty_network(); } @@ -1646,7 +1637,7 @@ fn test_parent_lookup_too_deep_grow_tip() { ); // Should not penalize peer, but network is not clear because of the blocks_by_range requests rig.expect_no_penalty_for(peer_id); - rig.assert_failed_chain(tip.canonical_root()); + rig.assert_ignored_chain(tip.canonical_root()); } #[test] @@ -1699,15 +1690,14 @@ fn test_lookup_add_peers_to_parent() { } #[test] -fn test_skip_creating_failed_parent_lookup() { +fn test_skip_creating_ignored_parent_lookup() { let mut rig = TestRig::test_setup(); let (_, block, parent_root, _) = rig.rand_block_and_parent(); let peer_id = rig.new_connected_peer(); - rig.insert_failed_chain(parent_root); + rig.insert_ignored_chain(parent_root); rig.trigger_unknown_parent_block(peer_id, block.into()); - // Expect single penalty for peer, despite dropping two lookups - rig.expect_single_penalty(peer_id, "failed_chain"); - // Both current and parent lookup should be rejected + rig.expect_no_penalty_for(peer_id); + // Both current and parent lookup should not be created rig.expect_no_active_lookups(); } @@ -1845,7 +1835,7 @@ fn block_in_processing_cache_becomes_invalid() { let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); let block_root = block.canonical_root(); let peer_id = r.new_connected_peer(); - r.insert_block_to_processing_cache(block.clone().into()); + r.insert_block_to_availability_cache(block.clone().into()); r.trigger_unknown_block_from_attestation(block_root, peer_id); // Should trigger blob request let id = r.expect_blob_lookup_request(block_root); @@ -1871,7 +1861,7 @@ fn block_in_processing_cache_becomes_valid_imported() { let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); let block_root = block.canonical_root(); let peer_id = r.new_connected_peer(); - r.insert_block_to_processing_cache(block.clone().into()); + r.insert_block_to_availability_cache(block.clone().into()); r.trigger_unknown_block_from_attestation(block_root, peer_id); // Should trigger blob request let id = r.expect_blob_lookup_request(block_root); diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index dd01f568fa..24e2cfbbb5 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -457,32 +457,35 @@ impl OperationPool { .collect() } - /// Prune proposer slashings for validators which are exited in the finalized epoch. - pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { + /// Prune proposer slashings for validators which are already slashed or exited in the finalized + /// epoch. + pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, - head_state, + |_, validator| { + validator.slashed || validator.exit_epoch <= finalized_state.current_epoch() + }, + finalized_state, ); } /// Prune attester slashings for all slashed or withdrawn validators, or attestations on another /// fork. - pub fn prune_attester_slashings(&self, head_state: &BeaconState) { + pub fn prune_attester_slashings(&self, finalized_state: &BeaconState) { self.attester_slashings.write().retain(|slashing| { // Check that the attestation's signature is still valid wrt the fork version. - let signature_ok = slashing.signature_is_still_valid(&head_state.fork()); + // We might be a bit slower to detect signature staleness by using the finalized state + // here, but we filter when proposing anyway, so in the worst case we just keep some + // stuff around until we finalize. + let signature_ok = slashing.signature_is_still_valid(&finalized_state.fork()); // Slashings that don't slash any validators can also be dropped. let slashing_ok = get_slashable_indices_modular( - head_state, + finalized_state, slashing.as_inner().to_ref(), |_, validator| { - // Declare that a validator is still slashable if they have not exited prior - // to the finalized epoch. - // - // We cannot check the `slashed` field since the `head` is not finalized and - // a fork could un-slash someone. - validator.exit_epoch > head_state.finalized_checkpoint().epoch + // Declare that a validator is still slashable if they have not been slashed in + // the finalized state, and have not exited at the finalized epoch. + !validator.slashed && validator.exit_epoch > finalized_state.current_epoch() }, ) .is_ok_and(|indices| !indices.is_empty()); @@ -531,17 +534,12 @@ impl OperationPool { ) } - /// Prune if validator has already exited at or before the finalized checkpoint of the head. - pub fn prune_voluntary_exits(&self, head_state: &BeaconState) { + /// Prune if validator has already exited in the finalized state. + pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState, spec: &ChainSpec) { prune_validator_hash_map( &mut self.voluntary_exits.write(), - // This condition is slightly too loose, since there will be some finalized exits that - // are missed here. - // - // We choose simplicity over the gain of pruning more exits since they are small and - // should not be seen frequently. - |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, - head_state, + |_, validator| validator.exit_epoch != spec.far_future_epoch, + finalized_state, ); } @@ -642,14 +640,15 @@ impl OperationPool { &self, head_block: &SignedBeaconBlock, head_state: &BeaconState, + finalized_state: &BeaconState, current_epoch: Epoch, spec: &ChainSpec, ) { self.prune_attestations(current_epoch); self.prune_sync_contributions(head_state.slot()); - self.prune_proposer_slashings(head_state); - self.prune_attester_slashings(head_state); - self.prune_voluntary_exits(head_state); + self.prune_proposer_slashings(finalized_state); + self.prune_attester_slashings(finalized_state); + self.prune_voluntary_exits(finalized_state, spec); self.prune_bls_to_execution_changes(head_block, head_state, spec); } @@ -758,14 +757,14 @@ where fn prune_validator_hash_map( map: &mut HashMap>, prune_if: F, - head_state: &BeaconState, + state: &BeaconState, ) where F: Fn(u64, &Validator) -> bool, T: VerifyOperation, { map.retain(|&validator_index, op| { - op.signature_is_still_valid(&head_state.fork()) - && head_state + op.signature_is_still_valid(&state.fork()) + && state .validators() .get(validator_index as usize) .is_none_or(|validator| !prune_if(validator_index, validator)) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 386eb721a0..2e3b3fde4b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -47,16 +47,17 @@ pub fn cli_app() -> Command { * Network parameters. */ .arg( - Arg::new("subscribe-all-data-column-subnets") - .long("subscribe-all-data-column-subnets") + Arg::new("supernode") + .long("supernode") + .alias("subscribe-all-data-column-subnets") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) - .help("Subscribe to all data column subnets and participate in data custody for \ - all columns. This will also advertise the beacon node as being long-lived \ - subscribed to all data column subnets. \ - NOTE: this is an experimental flag and may change any time without notice!") + .help("Run as a voluntary supernode. This node will subscribe to all data column \ + subnets, custody all data columns, and perform reconstruction and cross-seeding. \ + This requires significantly more bandwidth, storage, and computation requirements but \ + the node will have direct access to all blobs via the beacon API and it \ + helps network resilience by serving all data columns to syncing peers.") .display_order(0) - .hide(true) ) .arg( // TODO(das): remove this before PeerDAS release @@ -401,6 +402,16 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("complete-blob-backfill") + .long("complete-blob-backfill") + .help("Download all blobs back to the Deneb fork epoch. This will likely result in \ + the node banning most of its peers.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + .hide(true) + ) .arg( Arg::new("enable-private-discovery") .long("enable-private-discovery") @@ -688,38 +699,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - - /* - * Eth1 Integration - */ - .arg( - Arg::new("eth1-purge-cache") - .long("eth1-purge-cache") - .value_name("PURGE-CACHE") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("eth1-blocks-per-log-query") - .long("eth1-blocks-per-log-query") - .value_name("BLOCKS") - .help("DEPRECATED") - .action(ArgAction::Set) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("eth1-cache-follow-distance") - .long("eth1-cache-follow-distance") - .value_name("BLOCKS") - .help("DEPRECATED") - .action(ArgAction::Set) - .display_order(0) - .hide(true) - ) .arg( Arg::new("slots-per-restore-point") .long("slots-per-restore-point") @@ -769,7 +748,7 @@ pub fn cli_app() -> Command { .long("block-cache-size") .value_name("SIZE") .help("Specifies how many blocks the database should cache in memory") - .default_value("5") + .default_value("0") .action(ArgAction::Set) .display_order(0) ) @@ -1487,16 +1466,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("disable-deposit-contract-sync") - .long("disable-deposit-contract-sync") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .conflicts_with("staking") - .display_order(0) - .hide(true) - ) .arg( Arg::new("disable-optimistic-finalized-sync") .long("disable-optimistic-finalized-sync") @@ -1507,15 +1476,6 @@ pub fn cli_app() -> Command { Lighthouse and only passed to the EL if initial verification fails.") .display_order(0) ) - .arg( - Arg::new("light-client-server") - .long("light-client-server") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-light-client-server") .long("disable-light-client-server") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 1b5f25b317..c2599ec0cd 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -170,13 +170,6 @@ pub fn get_config( parse_required(cli_args, "http-duplicate-block-status")?; } - if cli_args.get_flag("light-client-server") { - warn!( - "The --light-client-server flag is deprecated. The light client server is enabled \ - by default" - ); - } - if cli_args.get_flag("disable-light-client-server") { client_config.chain.enable_light_client_server = false; } @@ -262,24 +255,6 @@ pub fn get_config( client_config.http_metrics.allocator_metrics_enabled = false; } - /* - * Deprecated Eth1 flags (can be removed in the next minor release after v7.1.0) - */ - if cli_args - .get_one::("eth1-blocks-per-log-query") - .is_some() - { - warn!("The eth1-blocks-per-log-query flag is deprecated"); - } - - if cli_args.get_flag("eth1-purge-cache") { - warn!("The eth1-purge-cache flag is deprecated"); - } - - if clap_utils::parse_optional::(cli_args, "eth1-cache-follow-distance")?.is_some() { - warn!("The eth1-cache-follow-distance flag is deprecated"); - } - // `--execution-endpoint` is required now. let endpoints: String = clap_utils::parse_required(cli_args, "execution-endpoint")?; let mut el_config = execution_layer::Config::default(); @@ -773,10 +748,6 @@ pub fn get_config( } } - if cli_args.get_flag("disable-deposit-contract-sync") { - warn!("The disable-deposit-contract-sync flag is deprecated"); - } - client_config.chain.prepare_payload_lookahead = clap_utils::parse_optional(cli_args, "prepare-payload-lookahead")? .map(Duration::from_millis) @@ -825,6 +796,14 @@ pub fn get_config( client_config.chain.genesis_backfill = true; } + client_config.chain.complete_blob_backfill = cli_args.get_flag("complete-blob-backfill"); + + // Ensure `prune_blobs` is false whenever complete-blob-backfill is set. This overrides any + // setting of `--prune-blobs true` applied earlier in flag parsing. + if client_config.chain.complete_blob_backfill { + client_config.store.prune_blobs = false; + } + // Backfill sync rate-limiting client_config.beacon_processor.enable_backfill_rate_limiting = !cli_args.get_flag("disable-backfill-rate-limiting"); @@ -1154,7 +1133,7 @@ pub fn set_network_config( config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; - if parse_flag(cli_args, "subscribe-all-data-column-subnets") { + if parse_flag(cli_args, "supernode") { config.subscribe_all_data_column_subnets = true; } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index ad81fa6076..c0f15f2417 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -19,7 +19,7 @@ pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::LevelDb; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; -pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); +pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 0; pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); pub const DEFAULT_STATE_CACHE_HEADROOM: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; @@ -34,7 +34,7 @@ pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StoreConfig { /// Maximum number of blocks to store in the in-memory block cache. - pub block_cache_size: NonZeroUsize, + pub block_cache_size: usize, /// Maximum number of states to store in the in-memory state cache. pub state_cache_size: NonZeroUsize, /// Minimum number of states to cull from the state cache upon fullness. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7156c75f11..0d8a65e064 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -70,7 +70,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// The hot database also contains all blocks. pub hot_db: Hot, /// LRU cache of deserialized blocks and blobs. Updated whenever a block or blob is loaded. - block_cache: Mutex>, + block_cache: Option>>, /// Cache of beacon states. /// /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. @@ -229,7 +229,9 @@ impl HotColdDB, MemoryStore> { cold_db: MemoryStore::open(), blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), - block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), + block_cache: NonZeroUsize::new(config.block_cache_size) + .map(BlockCache::new) + .map(Mutex::new), state_cache: Mutex::new(StateCache::new( config.state_cache_size, config.state_cache_headroom, @@ -281,7 +283,9 @@ impl HotColdDB, BeaconNodeBackend> { blobs_db: BeaconNodeBackend::open(&config, blobs_db_path)?, cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, - block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), + block_cache: NonZeroUsize::new(config.block_cache_size) + .map(BlockCache::new) + .map(Mutex::new), state_cache: Mutex::new(StateCache::new( config.state_cache_size, config.state_cache_headroom, @@ -488,14 +492,17 @@ impl, Cold: ItemStore> HotColdDB pub fn register_metrics(&self) { let hsc_metrics = self.historic_state_cache.lock().metrics(); - metrics::set_gauge( - &metrics::STORE_BEACON_BLOCK_CACHE_SIZE, - self.block_cache.lock().block_cache.len() as i64, - ); - metrics::set_gauge( - &metrics::STORE_BEACON_BLOB_CACHE_SIZE, - self.block_cache.lock().blob_cache.len() as i64, - ); + if let Some(block_cache) = &self.block_cache { + let cache = block_cache.lock(); + metrics::set_gauge( + &metrics::STORE_BEACON_BLOCK_CACHE_SIZE, + cache.block_cache.len() as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_BLOB_CACHE_SIZE, + cache.blob_cache.len() as i64, + ); + } let state_cache = self.state_cache.lock(); metrics::set_gauge( &metrics::STORE_BEACON_STATE_CACHE_SIZE, @@ -553,7 +560,9 @@ impl, Cold: ItemStore> HotColdDB let block = self.block_as_kv_store_ops(block_root, block, &mut ops)?; self.hot_db.do_atomically(ops)?; // Update cache. - self.block_cache.lock().put_block(*block_root, block); + self.block_cache + .as_ref() + .inspect(|cache| cache.lock().put_block(*block_root, block)); Ok(()) } @@ -605,7 +614,9 @@ impl, Cold: ItemStore> HotColdDB metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); // Check the cache. - if let Some(block) = self.block_cache.lock().get_block(block_root) { + if let Some(cache) = &self.block_cache + && let Some(block) = cache.lock().get_block(block_root) + { metrics::inc_counter(&metrics::BEACON_BLOCK_CACHE_HIT_COUNT); return Ok(Some(DatabaseBlock::Full(block.clone()))); } @@ -630,8 +641,8 @@ impl, Cold: ItemStore> HotColdDB // Add to cache. self.block_cache - .lock() - .put_block(*block_root, full_block.clone()); + .as_ref() + .inspect(|cache| cache.lock().put_block(*block_root, full_block.clone())); DatabaseBlock::Full(full_block) } else if !self.config.prune_payloads { @@ -656,6 +667,7 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch a full block with execution payload from the store. + #[instrument(skip_all)] pub fn get_full_block( &self, block_root: &Hash256, @@ -901,7 +913,9 @@ impl, Cold: ItemStore> HotColdDB /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { - self.block_cache.lock().delete(block_root); + self.block_cache + .as_ref() + .inspect(|cache| cache.lock().delete(block_root)); self.hot_db .key_delete(DBColumn::BeaconBlock, block_root.as_slice())?; self.hot_db @@ -916,7 +930,9 @@ impl, Cold: ItemStore> HotColdDB block_root.as_slice(), &blobs.as_ssz_bytes(), )?; - self.block_cache.lock().put_blobs(*block_root, blobs); + self.block_cache + .as_ref() + .inspect(|cache| cache.lock().put_blobs(*block_root, blobs)); Ok(()) } @@ -944,9 +960,11 @@ impl, Cold: ItemStore> HotColdDB self.blobs_db .put(&DATA_COLUMN_CUSTODY_INFO_KEY, &data_column_custody_info)?; - self.block_cache - .lock() - .put_data_column_custody_info(Some(data_column_custody_info)); + self.block_cache.as_ref().inspect(|cache| { + cache + .lock() + .put_data_column_custody_info(Some(data_column_custody_info)) + }); Ok(()) } @@ -963,8 +981,8 @@ impl, Cold: ItemStore> HotColdDB &data_column.as_ssz_bytes(), )?; self.block_cache - .lock() - .put_data_column(*block_root, data_column); + .as_ref() + .inspect(|cache| cache.lock().put_data_column(*block_root, data_column)); } Ok(()) } @@ -1398,7 +1416,7 @@ impl, Cold: ItemStore> HotColdDB // Update database whilst holding a lock on cache, to ensure that the cache updates // atomically with the database. - let mut guard = self.block_cache.lock(); + let guard = self.block_cache.as_ref().map(|cache| cache.lock()); let blob_cache_ops = blobs_ops.clone(); // Try to execute blobs store ops. @@ -1445,56 +1463,67 @@ impl, Cold: ItemStore> HotColdDB return Err(e); } - for op in hot_db_cache_ops { + // Delete from the state cache. + for op in &hot_db_cache_ops { match op { - StoreOp::PutBlock(block_root, block) => { - guard.put_block(block_root, (*block).clone()); - } - - StoreOp::PutBlobs(_, _) => (), - - StoreOp::PutDataColumns(_, _) => (), - - StoreOp::PutState(_, _) => (), - - StoreOp::PutStateSummary(_, _) => (), - StoreOp::DeleteBlock(block_root) => { - guard.delete_block(&block_root); - self.state_cache.lock().delete_block_states(&block_root); + self.state_cache.lock().delete_block_states(block_root); } - StoreOp::DeleteState(state_root, _) => { - self.state_cache.lock().delete_state(&state_root) + self.state_cache.lock().delete_state(state_root) } - - StoreOp::DeleteBlobs(_) => (), - - StoreOp::DeleteDataColumns(_, _) => (), - - StoreOp::DeleteExecutionPayload(_) => (), - - StoreOp::DeleteSyncCommitteeBranch(_) => (), - - StoreOp::KeyValueOp(_) => (), - } - } - - for op in blob_cache_ops { - match op { - StoreOp::PutBlobs(block_root, blobs) => { - guard.put_blobs(block_root, blobs); - } - - StoreOp::DeleteBlobs(block_root) => { - guard.delete_blobs(&block_root); - } - _ => (), } } - drop(guard); + // If the block cache is enabled, also delete from the block cache. + if let Some(mut guard) = guard { + for op in hot_db_cache_ops { + match op { + StoreOp::PutBlock(block_root, block) => { + guard.put_block(block_root, (*block).clone()); + } + + StoreOp::PutBlobs(_, _) => (), + + StoreOp::PutDataColumns(_, _) => (), + + StoreOp::PutState(_, _) => (), + + StoreOp::PutStateSummary(_, _) => (), + + StoreOp::DeleteBlock(block_root) => { + guard.delete_block(&block_root); + } + + StoreOp::DeleteState(_, _) => (), + + StoreOp::DeleteBlobs(_) => (), + + StoreOp::DeleteDataColumns(_, _) => (), + + StoreOp::DeleteExecutionPayload(_) => (), + + StoreOp::DeleteSyncCommitteeBranch(_) => (), + + StoreOp::KeyValueOp(_) => (), + } + } + + for op in blob_cache_ops { + match op { + StoreOp::PutBlobs(block_root, blobs) => { + guard.put_blobs(block_root, blobs); + } + + StoreOp::DeleteBlobs(block_root) => { + guard.delete_blobs(&block_root); + } + + _ => (), + } + } + } Ok(()) } @@ -2424,21 +2453,23 @@ impl, Cold: ItemStore> HotColdDB /// If custody info doesn't exist in the cache, /// try to fetch from the DB and prime the cache. pub fn get_data_column_custody_info(&self) -> Result, Error> { - let Some(data_column_custody_info) = self.block_cache.lock().get_data_column_custody_info() - else { - let data_column_custody_info = self - .blobs_db - .get::(&DATA_COLUMN_CUSTODY_INFO_KEY)?; + if let Some(cache) = &self.block_cache + && let Some(data_column_custody_info) = cache.lock().get_data_column_custody_info() + { + return Ok(Some(data_column_custody_info)); + } + let data_column_custody_info = self + .blobs_db + .get::(&DATA_COLUMN_CUSTODY_INFO_KEY)?; - // Update the cache - self.block_cache + // Update the cache + self.block_cache.as_ref().inspect(|cache| { + cache .lock() - .put_data_column_custody_info(data_column_custody_info.clone()); + .put_data_column_custody_info(data_column_custody_info.clone()) + }); - return Ok(data_column_custody_info); - }; - - Ok(Some(data_column_custody_info)) + Ok(data_column_custody_info) } /// Fetch all columns for a given block from the store. @@ -2459,9 +2490,13 @@ impl, Cold: ItemStore> HotColdDB /// Fetch blobs for a given block from the store. pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { // Check the cache. - if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { + if let Some(blobs) = self + .block_cache + .as_ref() + .and_then(|cache| cache.lock().get_blobs(block_root).cloned()) + { metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); - return Ok(blobs.clone().into()); + return Ok(blobs.into()); } match self @@ -2480,8 +2515,8 @@ impl, Cold: ItemStore> HotColdDB { let blobs = BlobSidecarList::new(blobs, max_blobs_per_block as usize)?; self.block_cache - .lock() - .put_blobs(*block_root, blobs.clone()); + .as_ref() + .inspect(|cache| cache.lock().put_blobs(*block_root, blobs.clone())); Ok(BlobSidecarListFromRoot::Blobs(blobs)) } else { @@ -2514,8 +2549,8 @@ impl, Cold: ItemStore> HotColdDB // Check the cache. if let Some(data_column) = self .block_cache - .lock() - .get_data_column(block_root, column_index) + .as_ref() + .and_then(|cache| cache.lock().get_data_column(block_root, column_index)) { metrics::inc_counter(&metrics::BEACON_DATA_COLUMNS_CACHE_HIT_COUNT); return Ok(Some(data_column)); @@ -2527,9 +2562,11 @@ impl, Cold: ItemStore> HotColdDB )? { Some(ref data_column_bytes) => { let data_column = Arc::new(DataColumnSidecar::from_ssz_bytes(data_column_bytes)?); - self.block_cache - .lock() - .put_data_column(*block_root, data_column.clone()); + self.block_cache.as_ref().inspect(|cache| { + cache + .lock() + .put_data_column(*block_root, data_column.clone()) + }); Ok(Some(data_column)) } None => Ok(None), @@ -3263,11 +3300,11 @@ impl, Cold: ItemStore> HotColdDB } // Remove deleted blobs from the cache. - let mut block_cache = self.block_cache.lock(); - for block_root in removed_block_roots { - block_cache.delete_blobs(&block_root); + if let Some(mut block_cache) = self.block_cache.as_ref().map(|cache| cache.lock()) { + for block_root in removed_block_roots { + block_cache.delete_blobs(&block_root); + } } - drop(block_cache); let new_blob_info = BlobInfo { oldest_blob_slot: Some(end_slot + 1), diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index e29397619c..3552a90b0e 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -17,6 +17,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v8.0.0-rc.0 | Sep 2025 | v28 | yes before Fulu | | v7.1.0 | Jul 2025 | v26 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | @@ -207,6 +208,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v8.0.0-rc.0 | Sep 2025 | v28 | yes before Fulu | | v7.1.0 | Jul 2025 | v26 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | diff --git a/book/src/help_bn.md b/book/src/help_bn.md index ea02b39bee..6680202a27 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -22,7 +22,7 @@ Options: Data directory for the blobs database. --block-cache-size Specifies how many blocks the database should cache in memory - [default: 5] + [default: 0] --boot-nodes One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported. @@ -513,8 +513,6 @@ Flags: subscriptions. This will only import attestations from already-subscribed subnets, use with --subscribe-all-subnets to ensure all attestations are received for import. - --light-client-server - DEPRECATED --log-color [] Enables/Disables colors for logs in terminal. Set it to false to disable colors. [default: true] [possible values: true, false] @@ -571,6 +569,13 @@ Flags: Subscribe to all subnets regardless of validator count. This will also advertise the beacon node as being long-lived subscribed to all subnets. + --supernode + Run as a voluntary supernode. This node will subscribe to all data + column subnets, custody all data columns, and perform reconstruction + and cross-seeding. This requires significantly more bandwidth, + storage, and computation requirements but the node will have direct + access to all blobs via the beacon API and it helps network resilience + by serving all data columns to syncing peers. --validator-monitor-auto Enables the automatic detection and monitoring of validators connected to the HTTP API and using the subnet subscription endpoint. This diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index d431dbeadf..aedd57dd4b 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "7.1.0" +version = "8.0.0-rc.1" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 3368569d59..0423794d0d 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1336,7 +1336,7 @@ impl BeaconNodeHttpClient { } /// Path for `v1/beacon/blob_sidecars/{block_id}` - pub fn get_blobs_path(&self, block_id: BlockId) -> Result { + pub fn get_blob_sidecars_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1346,6 +1346,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/blobs/{blob_id}` + pub fn get_blobs_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blobs") + .push(&block_id.to_string()); + Ok(path) + } + /// Path for `v1/beacon/blinded_blocks/{block_id}` pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V1)?; @@ -1374,13 +1385,13 @@ impl BeaconNodeHttpClient { /// `GET v1/beacon/blob_sidecars/{block_id}` /// /// Returns `Ok(None)` on a 404 error. - pub async fn get_blobs( + pub async fn get_blob_sidecars( &self, block_id: BlockId, indices: Option<&[u64]>, spec: &ChainSpec, ) -> Result>>, Error> { - let mut path = self.get_blobs_path(block_id)?; + let mut path = self.get_blob_sidecars_path(block_id)?; if let Some(indices) = indices { let indices_string = indices .iter() @@ -1400,6 +1411,31 @@ impl BeaconNodeHttpClient { .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } + /// `GET v1/beacon/blobs/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_blobs( + &self, + block_id: BlockId, + versioned_hashes: Option<&[Hash256]>, + ) -> Result>>>, Error> + { + let mut path = self.get_blobs_path(block_id)?; + if let Some(hashes) = versioned_hashes { + let hashes_string = hashes + .iter() + .map(|hash| hash.to_string()) + .collect::>() + .join(","); + path.query_pairs_mut() + .append_pair("versioned_hashes", &hashes_string); + } + + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::Unversioned)) + } + /// `GET v1/beacon/blinded_blocks/{block_id}` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index b72ab29380..8f553b57d9 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -716,6 +716,13 @@ pub struct BlobIndicesQuery { pub indices: Option>, } +#[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct BlobsVersionedHashesQuery { + #[serde(default, deserialize_with = "option_query_vec")] + pub versioned_hashes: Option>, +} + #[derive(Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct DataColumnIndicesQuery { @@ -2317,6 +2324,14 @@ pub struct StandardAttestationRewards { pub total_rewards: Vec, } +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode)] +#[serde(bound = "E: EthSpec")] +#[serde(transparent)] +pub struct BlobWrapper { + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub blob: Blob, +} + #[cfg(test)] mod test { use std::fmt::Debug; diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index ab5f0f3bde..b1e9faea1d 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -38,7 +38,7 @@ ELECTRA_FORK_VERSION: 0x06017000 ELECTRA_FORK_EPOCH: 115968 # Fulu FULU_FORK_VERSION: 0x07017000 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 165120 # Gloas GLOAS_FORK_VERSION: 0x08017000 GLOAS_FORK_EPOCH: 18446744073709551615 @@ -47,6 +47,8 @@ GLOAS_FORK_EPOCH: 18446744073709551615 # --------------------------------------------------------------- # 12 seconds SECONDS_PER_SLOT: 12 +# 1200 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours @@ -55,6 +57,18 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- @@ -141,13 +155,30 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 166400 + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 167936 + MAX_BLOBS_PER_BLOCK: 21 + # Gloas \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml index 01322974c8..256957e119 100644 --- a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml @@ -42,7 +42,7 @@ ELECTRA_FORK_EPOCH: 2048 # Fulu FULU_FORK_VERSION: 0x70000910 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 50688 # Gloas GLOAS_FORK_VERSION: 0x80000910 @@ -53,6 +53,8 @@ GLOAS_FORK_EPOCH: 18446744073709551615 # --------------------------------------------------------------- # 12 seconds SECONDS_PER_SLOT: 12 +# 12000 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 12 # 2**8 (= 256) epochs ~27 hours @@ -61,6 +63,18 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- @@ -154,15 +168,33 @@ WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 WHISK_PROPOSER_SELECTION_GAP: 2 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 52480 + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 54016 + MAX_BLOBS_PER_BLOCK: 21 + # Gloas # EIP7732 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 9802e409fb..b1a01933d7 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -42,7 +42,7 @@ ELECTRA_FORK_EPOCH: 222464 # Fulu FULU_FORK_VERSION: 0x90000075 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 272640 # Gloas GLOAS_FORK_VERSION: 0x90000076 @@ -52,6 +52,8 @@ GLOAS_FORK_EPOCH: 18446744073709551615 # --------------------------------------------------------------- # 12 seconds SECONDS_PER_SLOT: 12 +# 12000 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours @@ -60,6 +62,18 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle @@ -147,13 +161,31 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 274176 + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 275712 + MAX_BLOBS_PER_BLOCK: 21 + # Gloas \ No newline at end of file diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 12de21239a..a674a81557 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -244,7 +244,7 @@ impl Eth2NetworkConfig { } } - fn get_genesis_state_from_bytes(&self) -> Result, String> { + pub fn get_genesis_state_from_bytes(&self) -> Result, String> { let spec = self.chain_spec::()?; self.genesis_state_bytes .as_ref() diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c45dbac4d3..a3f0ca404f 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.1.0-", - fallback = "Lighthouse/v7.1.0" + prefix = "Lighthouse/v8.0.0-rc.1-", + fallback = "Lighthouse/v8.0.0-rc.1" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.1.0" + "8.0.0-rc.1" } /// Returns the name of the current client running. diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index 923ac1758f..e631d272b7 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -1,4 +1,5 @@ use crate::utils::is_ascii_control; +use std::collections::HashSet; use chrono::prelude::*; use serde_json::{Map, Value}; @@ -261,6 +262,12 @@ fn build_log_json( let module_field = format!("{}:{}", module_path, line_number); log_map.insert("module".to_string(), Value::String(module_field)); + // Avoid adding duplicate fields; prefer event fields when duplicates exist. + for (key, val) in span_fields { + let parsed_span_val = parse_field(val); + log_map.insert(key.clone(), parsed_span_val); + } + for (key, val) in visitor.fields.clone().into_iter() { let cleaned_value = if val.starts_with('\"') && val.ends_with('\"') && val.len() >= 2 { &val[1..val.len() - 1] @@ -272,11 +279,6 @@ fn build_log_json( log_map.insert(key, parsed_val); } - for (key, val) in span_fields { - let parsed_span_val = parse_field(val); - log_map.insert(key.clone(), parsed_span_val); - } - let json_obj = Value::Object(log_map); let output = format!("{}\n", json_obj); @@ -299,23 +301,6 @@ fn build_log_text( let bold_start = "\x1b[1m"; let bold_end = "\x1b[0m"; - let mut formatted_spans = String::new(); - for (i, (field_name, field_value)) in span_fields.iter().rev().enumerate() { - if use_color { - formatted_spans.push_str(&format!( - "{}{}{}: {}", - bold_start, field_name, bold_end, field_value - )); - } else { - formatted_spans.push_str(&format!("{}: {}", field_name, field_value)); - } - - // Check if this is not the last span. - if i != span_fields.len() - 1 { - formatted_spans.push_str(", "); - } - } - let pad = if plain_level_str.len() < ALIGNED_LEVEL_WIDTH { " " } else { @@ -351,24 +336,26 @@ fn build_log_text( message_content.clone() }; - let mut formatted_fields = String::new(); - for (i, (field_name, field_value)) in visitor.fields.iter().enumerate() { - if i > 0 { - formatted_fields.push_str(", "); - } - if use_color { - formatted_fields.push_str(&format!( - "{}{}{}: {}", - bold_start, field_name, bold_end, field_value - )); - } else { - formatted_fields.push_str(&format!("{}: {}", field_name, field_value)); - } - // Check if this is the last field and that we are also adding spans. - if i == visitor.fields.len() - 1 && !span_fields.is_empty() { - formatted_fields.push(','); - } - } + // Avoid adding duplicate fields; prefer event fields when duplicates exist. + let mut added_field_names = HashSet::new(); + let formatted_fields = visitor + .fields + .iter() + .chain(span_fields.iter()) + .filter_map(|(field_name, field_value)| { + if added_field_names.insert(field_name) { + let formatted_field = if use_color { + format!("{}{}{}: {}", bold_start, field_name, bold_end, field_value) + } else { + format!("{}: {}", field_name, field_value) + }; + Some(formatted_field) + } else { + None + } + }) + .collect::>() + .join(", "); let full_message = if !formatted_fields.is_empty() { format!("{} {}", padded_message, formatted_fields) @@ -378,14 +365,11 @@ fn build_log_text( let message = if !location.is_empty() { format!( - "{} {} {} {} {}\n", - timestamp, level_str, location, full_message, formatted_spans + "{} {} {} {}\n", + timestamp, level_str, location, full_message ) } else { - format!( - "{} {} {} {}\n", - timestamp, level_str, full_message, formatted_spans - ) + format!("{} {} {}\n", timestamp, level_str, full_message) }; if let Err(e) = writer.write_all(message.as_bytes()) { @@ -436,7 +420,7 @@ mod tests { fn test_build_log_text_single_log_field() { let log_fields = vec![("field_name".to_string(), "field_value".to_string())]; let span_fields = vec![]; - let expected = "Jan 1 08:00:00.000 INFO test message field_name: field_value \n"; + let expected = "Jan 1 08:00:00.000 INFO test message field_name: field_value\n"; test_build_log_text(log_fields, span_fields, expected); } @@ -447,7 +431,7 @@ mod tests { ("field_name2".to_string(), "field_value2".to_string()), ]; let span_fields = vec![]; - let expected = "Jan 1 08:00:00.000 INFO test message field_name1: field_value1, field_name2: field_value2 \n"; + let expected = "Jan 1 08:00:00.000 INFO test message field_name1: field_value1, field_name2: field_value2\n"; test_build_log_text(log_fields, span_fields, expected); } @@ -469,7 +453,7 @@ mod tests { "span_field_name".to_string(), "span_field_value".to_string(), )]; - let expected = "Jan 1 08:00:00.000 INFO test message span_field_name: span_field_value\n"; + let expected = "Jan 1 08:00:00.000 INFO test message span_field_name: span_field_value\n"; test_build_log_text(log_fields, span_fields, expected); } @@ -486,7 +470,7 @@ mod tests { "span_field_value2".to_string(), ), ]; - let expected = "Jan 1 08:00:00.000 INFO test message span_field_name2: span_field_value2, span_field_name1: span_field_value1\n"; + let expected = "Jan 1 08:00:00.000 INFO test message span_field_name1: span_field_value1, span_field_name2: span_field_value2\n"; test_build_log_text(log_fields, span_fields, expected); } @@ -503,7 +487,35 @@ mod tests { "span_field_value1-2".to_string(), ), ]; - let expected = "Jan 1 08:00:00.000 INFO test message span_field_name1-2: span_field_value1-2, span_field_name1-1: span_field_value1-1\n"; + let expected = "Jan 1 08:00:00.000 INFO test message span_field_name1-1: span_field_value1-1, span_field_name1-2: span_field_value1-2\n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_no_duplicate_log_span_fields() { + let log_fields = vec![ + ("field_name_1".to_string(), "field_value_1".to_string()), + ("field_name_2".to_string(), "field_value_2".to_string()), + ]; + let span_fields = vec![ + ("field_name_1".to_string(), "field_value_1".to_string()), + ("field_name_3".to_string(), "field_value_3".to_string()), + ]; + let expected = "Jan 1 08:00:00.000 INFO test message field_name_1: field_value_1, field_name_2: field_value_2, field_name_3: field_value_3\n"; + test_build_log_text(log_fields, span_fields, expected); + } + + #[test] + fn test_build_log_text_duplicate_fields_prefer_log_fields() { + let log_fields = vec![ + ("field_name_1".to_string(), "field_value_1_log".to_string()), + ("field_name_2".to_string(), "field_value_2".to_string()), + ]; + let span_fields = vec![ + ("field_name_1".to_string(), "field_value_1_span".to_string()), + ("field_name_3".to_string(), "field_value_3".to_string()), + ]; + let expected = "Jan 1 08:00:00.000 INFO test message field_name_1: field_value_1_log, field_name_2: field_value_2, field_name_3: field_value_3\n"; test_build_log_text(log_fields, span_fields, expected); } diff --git a/common/network_utils/src/discovery_metrics.rs b/common/network_utils/src/discovery_metrics.rs index d105dee57a..26a9e8a45f 100644 --- a/common/network_utils/src/discovery_metrics.rs +++ b/common/network_utils/src/discovery_metrics.rs @@ -35,8 +35,7 @@ pub static DISCOVERY_SESSIONS: LazyLock> = LazyLock::new(|| { }); pub fn scrape_discovery_metrics() { - let metrics = - discv5::metrics::Metrics::from(discv5::Discv5::::raw_metrics()); + let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics()); set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); set_gauge_vec(&DISCOVERY_BYTES, &["inbound"], metrics.bytes_recv as i64); diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index d4faf1e4b8..92a4fc4b59 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -8,6 +8,8 @@ edition = { workspace = true } async-channel = { workspace = true } futures = { workspace = true } metrics = { workspace = true } +num_cpus = { workspace = true } +rayon = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 5f0c822b03..0b8e9f8eba 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,12 +1,15 @@ mod metrics; +mod rayon_pool_provider; pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; -use std::sync::Weak; +use std::sync::{Arc, Weak}; use tokio::runtime::{Handle, Runtime}; use tracing::debug; +use crate::rayon_pool_provider::RayonPoolProvider; +pub use crate::rayon_pool_provider::RayonPoolType; pub use tokio::task::JoinHandle; /// Provides a reason when Lighthouse is shut down. @@ -84,6 +87,8 @@ pub struct TaskExecutor { // FIXME(sproul): delete? #[allow(dead_code)] service_name: String, + + rayon_pool_provider: Arc, } impl TaskExecutor { @@ -105,6 +110,7 @@ impl TaskExecutor { exit, signal_tx, service_name, + rayon_pool_provider: Arc::new(RayonPoolProvider::default()), } } @@ -115,6 +121,7 @@ impl TaskExecutor { exit: self.exit.clone(), signal_tx: self.signal_tx.clone(), service_name, + rayon_pool_provider: self.rayon_pool_provider.clone(), } } @@ -226,6 +233,47 @@ impl TaskExecutor { } } + /// Spawns a blocking task on a dedicated tokio thread pool and installs a rayon context within it. + pub fn spawn_blocking_with_rayon( + self, + task: F, + rayon_pool_type: RayonPoolType, + name: &'static str, + ) where + F: FnOnce() + Send + 'static, + { + let thread_pool = self.rayon_pool_provider.get_thread_pool(rayon_pool_type); + self.spawn_blocking( + move || { + thread_pool.install(|| { + task(); + }); + }, + name, + ) + } + + /// Spawns a blocking computation on a rayon thread pool and awaits the result. + pub async fn spawn_blocking_with_rayon_async( + &self, + rayon_pool_type: RayonPoolType, + task: F, + ) -> Result + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let thread_pool = self.rayon_pool_provider.get_thread_pool(rayon_pool_type); + let (tx, rx) = tokio::sync::oneshot::channel(); + + thread_pool.spawn(move || { + let result = task(); + let _ = tx.send(result); + }); + + rx.await + } + /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. /// The task is cancelled when the corresponding async-channel is dropped. diff --git a/common/task_executor/src/rayon_pool_provider.rs b/common/task_executor/src/rayon_pool_provider.rs new file mode 100644 index 0000000000..8e12f7eaa4 --- /dev/null +++ b/common/task_executor/src/rayon_pool_provider.rs @@ -0,0 +1,58 @@ +use rayon::{ThreadPool, ThreadPoolBuilder}; +use std::sync::Arc; + +const DEFAULT_LOW_PRIORITY_CPU_PERCENTAGE: usize = 25; +const DEFAULT_HIGH_PRIORITY_CPU_PERCENTAGE: usize = 80; +const MINIMUM_THREAD_COUNT: usize = 1; + +pub enum RayonPoolType { + HighPriority, + LowPriority, +} + +pub struct RayonPoolProvider { + /// Smaller rayon thread pool for lower-priority, compute-intensive tasks. + /// By default ~25% of CPUs or a minimum of 1 thread. + low_priority_thread_pool: Arc, + /// Larger rayon thread pool for high-priority, compute-intensive tasks. + /// By default ~80% of CPUs or a minimum of 1 thread. Citical/highest + /// priority tasks should use the global pool instead. + high_priority_thread_pool: Arc, +} + +impl Default for RayonPoolProvider { + fn default() -> Self { + let low_prio_threads = + (num_cpus::get() * DEFAULT_LOW_PRIORITY_CPU_PERCENTAGE / 100).max(MINIMUM_THREAD_COUNT); + let low_priority_thread_pool = Arc::new( + ThreadPoolBuilder::new() + .num_threads(low_prio_threads) + .build() + .expect("failed to build low-priority rayon pool"), + ); + + let high_prio_threads = (num_cpus::get() * DEFAULT_HIGH_PRIORITY_CPU_PERCENTAGE / 100) + .max(MINIMUM_THREAD_COUNT); + let high_priority_thread_pool = Arc::new( + ThreadPoolBuilder::new() + .num_threads(high_prio_threads) + .build() + .expect("failed to build high-priority rayon pool"), + ); + Self { + low_priority_thread_pool, + high_priority_thread_pool, + } + } +} + +impl RayonPoolProvider { + /// Get a scoped thread pool by priority level. + /// For critical/highest priority tasks, use the global pool instead. + pub fn get_thread_pool(&self, rayon_pool_type: RayonPoolType) -> Arc { + match rayon_pool_type { + RayonPoolType::HighPriority => self.high_priority_thread_pool.clone(), + RayonPoolType::LowPriority => self.low_priority_thread_pool.clone(), + } + } +} diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4b31dc60bd..dea853d245 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -160,6 +160,56 @@ pub struct Block { pub unrealized_finalized_checkpoint: Option, } +impl Block { + /// Compute the proposer shuffling decision root of a child block in `child_block_epoch`. + /// + /// This function assumes that `child_block_epoch >= self.epoch`. It is the responsibility of + /// the caller to check this condition, or else incorrect results will be produced. + pub fn proposer_shuffling_root_for_child_block( + &self, + child_block_epoch: Epoch, + spec: &ChainSpec, + ) -> Hash256 { + let block_epoch = self.current_epoch_shuffling_id.shuffling_epoch; + + // For child blocks in the Fulu fork epoch itself, we want to use the old logic. There is no + // lookahead in the first Fulu epoch. So we check whether Fulu is enabled at + // `child_block_epoch - 1`, i.e. whether `child_block_epoch > fulu_fork_epoch`. + if !spec + .fork_name_at_epoch(child_block_epoch.saturating_sub(1_u64)) + .fulu_enabled() + { + // Prior to Fulu the proposer shuffling decision root for the current epoch is the same + // as the attestation shuffling for the *next* epoch, i.e. it is determined at the start + // of the current epoch. + if block_epoch == child_block_epoch { + self.next_epoch_shuffling_id.shuffling_decision_block + } else { + // Otherwise, the child block epoch is greater, so its decision root is its parent + // root itself (this block's root). + self.root + } + } else { + // After Fulu the proposer shuffling is determined with lookahead, so if the block + // lies in the same epoch as its parent, its decision root is the same as the + // parent's current epoch attester shuffling + // + // i.e. the block from the end of epoch N - 2. + if child_block_epoch == block_epoch { + self.current_epoch_shuffling_id.shuffling_decision_block + } else if child_block_epoch == block_epoch + 1 { + // If the block is the next epoch, then it instead shares its decision root with + // the parent's *next epoch* attester shuffling. + self.next_epoch_shuffling_id.shuffling_decision_block + } else { + // The child block lies in the future beyond the lookahead, at the point where this + // block (its parent) will be the decision block. + self.root + } + } + } +} + /// A Vec-wrapper which will grow to match any request. /// /// E.g., a `get` or `insert` to an out-of-bounds element will cause the Vec to grow (using diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml deleted file mode 100644 index 9ac9fe28d3..0000000000 --- a/consensus/safe_arith/Cargo.toml +++ /dev/null @@ -1,8 +0,0 @@ -[package] -name = "safe_arith" -version = "0.1.0" -authors = ["Michael Sproul "] -edition = { workspace = true } -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] diff --git a/consensus/safe_arith/src/iter.rs b/consensus/safe_arith/src/iter.rs deleted file mode 100644 index d5ee51b588..0000000000 --- a/consensus/safe_arith/src/iter.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::{Result, SafeArith}; - -/// Extension trait for iterators, providing a safe replacement for `sum`. -pub trait SafeArithIter { - fn safe_sum(self) -> Result; -} - -impl SafeArithIter for I -where - I: Iterator + Sized, - T: SafeArith, -{ - fn safe_sum(mut self) -> Result { - self.try_fold(T::ZERO, |acc, x| acc.safe_add(x)) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::ArithError; - - #[test] - fn empty_sum() { - let v: Vec = vec![]; - assert_eq!(v.into_iter().safe_sum(), Ok(0)); - } - - #[test] - fn unsigned_sum_small() { - let arr = [400u64, 401, 402, 403, 404, 405, 406]; - assert_eq!( - arr.iter().copied().safe_sum().unwrap(), - arr.iter().copied().sum() - ); - } - - #[test] - fn unsigned_sum_overflow() { - let v = vec![u64::MAX, 1]; - assert_eq!(v.into_iter().safe_sum(), Err(ArithError::Overflow)); - } - - #[test] - fn signed_sum_small() { - let v = vec![-1i64, -2i64, -3i64, 3, 2, 1]; - assert_eq!(v.into_iter().safe_sum(), Ok(0)); - } - - #[test] - fn signed_sum_overflow_above() { - let v = vec![1, 2, 3, 4, i16::MAX, 0, 1, 2, 3]; - assert_eq!(v.into_iter().safe_sum(), Err(ArithError::Overflow)); - } - - #[test] - fn signed_sum_overflow_below() { - let v = vec![i16::MIN, -1]; - assert_eq!(v.into_iter().safe_sum(), Err(ArithError::Overflow)); - } - - #[test] - fn signed_sum_almost_overflow() { - let arr = [i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; - assert_eq!( - arr.iter().copied().safe_sum().unwrap(), - arr.iter().copied().sum() - ); - } -} diff --git a/consensus/safe_arith/src/lib.rs b/consensus/safe_arith/src/lib.rs deleted file mode 100644 index aa397c0603..0000000000 --- a/consensus/safe_arith/src/lib.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! Library for safe arithmetic on integers, avoiding overflow and division by zero. -mod iter; - -pub use iter::SafeArithIter; - -/// Error representing the failure of an arithmetic operation. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum ArithError { - Overflow, - DivisionByZero, -} - -pub type Result = std::result::Result; - -macro_rules! assign_method { - ($name:ident, $op:ident, $doc_op:expr) => { - assign_method!($name, $op, Self, $doc_op); - }; - ($name:ident, $op:ident, $rhs_ty:ty, $doc_op:expr) => { - #[doc = "Safe variant of `"] - #[doc = $doc_op] - #[doc = "`."] - #[inline] - fn $name(&mut self, other: $rhs_ty) -> Result<()> { - *self = self.$op(other)?; - Ok(()) - } - }; -} - -/// Trait providing safe arithmetic operations for built-in types. -pub trait SafeArith: Sized + Copy { - const ZERO: Self; - const ONE: Self; - - /// Safe variant of `+` that guards against overflow. - fn safe_add(&self, other: Rhs) -> Result; - - /// Safe variant of `-` that guards against overflow. - fn safe_sub(&self, other: Rhs) -> Result; - - /// Safe variant of `*` that guards against overflow. - fn safe_mul(&self, other: Rhs) -> Result; - - /// Safe variant of `/` that guards against division by 0. - fn safe_div(&self, other: Rhs) -> Result; - - /// Safe variant of `%` that guards against division by 0. - fn safe_rem(&self, other: Rhs) -> Result; - - /// Safe variant of `<<` that guards against overflow. - fn safe_shl(&self, other: u32) -> Result; - - /// Safe variant of `>>` that guards against overflow. - fn safe_shr(&self, other: u32) -> Result; - - assign_method!(safe_add_assign, safe_add, Rhs, "+="); - assign_method!(safe_sub_assign, safe_sub, Rhs, "-="); - assign_method!(safe_mul_assign, safe_mul, Rhs, "*="); - assign_method!(safe_div_assign, safe_div, Rhs, "/="); - assign_method!(safe_rem_assign, safe_rem, Rhs, "%="); - assign_method!(safe_shl_assign, safe_shl, u32, "<<="); - assign_method!(safe_shr_assign, safe_shr, u32, ">>="); -} - -macro_rules! impl_safe_arith { - ($typ:ty) => { - impl SafeArith for $typ { - const ZERO: Self = 0; - const ONE: Self = 1; - - #[inline] - fn safe_add(&self, other: Self) -> Result { - self.checked_add(other).ok_or(ArithError::Overflow) - } - - #[inline] - fn safe_sub(&self, other: Self) -> Result { - self.checked_sub(other).ok_or(ArithError::Overflow) - } - - #[inline] - fn safe_mul(&self, other: Self) -> Result { - self.checked_mul(other).ok_or(ArithError::Overflow) - } - - #[inline] - fn safe_div(&self, other: Self) -> Result { - self.checked_div(other).ok_or(ArithError::DivisionByZero) - } - - #[inline] - fn safe_rem(&self, other: Self) -> Result { - self.checked_rem(other).ok_or(ArithError::DivisionByZero) - } - - #[inline] - fn safe_shl(&self, other: u32) -> Result { - self.checked_shl(other).ok_or(ArithError::Overflow) - } - - #[inline] - fn safe_shr(&self, other: u32) -> Result { - self.checked_shr(other).ok_or(ArithError::Overflow) - } - } - }; -} - -impl_safe_arith!(u8); -impl_safe_arith!(u16); -impl_safe_arith!(u32); -impl_safe_arith!(u64); -impl_safe_arith!(usize); -impl_safe_arith!(i8); -impl_safe_arith!(i16); -impl_safe_arith!(i32); -impl_safe_arith!(i64); -impl_safe_arith!(isize); - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn basic() { - let x = 10u32; - let y = 11; - assert_eq!(x.safe_add(y), Ok(x + y)); - assert_eq!(y.safe_sub(x), Ok(y - x)); - assert_eq!(x.safe_mul(y), Ok(x * y)); - assert_eq!(x.safe_div(y), Ok(x / y)); - assert_eq!(x.safe_rem(y), Ok(x % y)); - - assert_eq!(x.safe_shl(1), Ok(x << 1)); - assert_eq!(x.safe_shr(1), Ok(x >> 1)); - } - - #[test] - fn mutate() { - let mut x = 0u8; - x.safe_add_assign(2).unwrap(); - assert_eq!(x, 2); - x.safe_sub_assign(1).unwrap(); - assert_eq!(x, 1); - x.safe_shl_assign(1).unwrap(); - assert_eq!(x, 2); - x.safe_mul_assign(3).unwrap(); - assert_eq!(x, 6); - x.safe_div_assign(4).unwrap(); - assert_eq!(x, 1); - x.safe_shr_assign(1).unwrap(); - assert_eq!(x, 0); - } - - #[test] - fn errors() { - assert!(u32::MAX.safe_add(1).is_err()); - assert!(u32::MIN.safe_sub(1).is_err()); - assert!(u32::MAX.safe_mul(2).is_err()); - assert!(u32::MAX.safe_div(0).is_err()); - assert!(u32::MAX.safe_rem(0).is_err()); - assert!(u32::MAX.safe_shl(32).is_err()); - assert!(u32::MAX.safe_shr(32).is_err()); - } -} diff --git a/consensus/state_processing/src/all_caches.rs b/consensus/state_processing/src/all_caches.rs index e49eb395c4..0381bb820f 100644 --- a/consensus/state_processing/src/all_caches.rs +++ b/consensus/state_processing/src/all_caches.rs @@ -1,8 +1,7 @@ use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use crate::epoch_cache::initialize_epoch_cache; -use types::{ - BeaconState, ChainSpec, EpochCacheError, EthSpec, FixedBytesExtended, Hash256, RelativeEpoch, -}; +use tracing::instrument; +use types::{BeaconState, ChainSpec, EpochCacheError, EthSpec, Hash256, RelativeEpoch}; /// Mixin trait for the beacon state that provides operations on *all* caches. /// @@ -23,6 +22,7 @@ pub trait AllCaches { } impl AllCaches for BeaconState { + #[instrument(skip_all)] fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), EpochCacheError> { self.build_caches(spec)?; initialize_epoch_cache(self, spec)?; @@ -32,8 +32,7 @@ impl AllCaches for BeaconState { fn all_caches_built(&self) -> bool { let current_epoch = self.current_epoch(); - let Ok(epoch_cache_decision_block_root) = - self.proposer_shuffling_decision_root(Hash256::zero()) + let Ok(epoch_cache_decision_block_root) = self.epoch_cache_decision_root(Hash256::ZERO) else { return false; }; diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 6654c6a7ef..86db037446 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -123,7 +123,7 @@ pub fn is_epoch_cache_initialized( let current_epoch = state.current_epoch(); let epoch_cache: &EpochCache = state.epoch_cache(); let decision_block_root = state - .proposer_shuffling_decision_root(Hash256::zero()) + .epoch_cache_decision_root(Hash256::zero()) .map_err(EpochCacheError::BeaconState)?; Ok(epoch_cache @@ -146,7 +146,7 @@ pub fn initialize_epoch_cache( let current_epoch = state.current_epoch(); let next_epoch = state.next_epoch().map_err(EpochCacheError::BeaconState)?; let decision_block_root = state - .proposer_shuffling_decision_root(Hash256::zero()) + .epoch_cache_decision_root(Hash256::zero()) .map_err(EpochCacheError::BeaconState)?; state.build_total_active_balance_cache(spec)?; diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs index 6b038ad73a..c2aced7047 100644 --- a/consensus/state_processing/src/upgrade/fulu.rs +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -33,9 +33,7 @@ fn initialize_proposer_lookahead( ); } - Vector::new(lookahead).map_err(|e| { - Error::PleaseNotifyTheDevs(format!("Failed to initialize proposer lookahead: {:?}", e)) - }) + Vector::new(lookahead).map_err(|e| e.into()) } pub fn upgrade_state_to_fulu( diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f4e4e36966..61c32dd4ac 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -843,6 +843,7 @@ impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, F } } +#[derive(Clone, Copy)] pub enum BlockImportSource { Gossip, Lookup, diff --git a/consensus/types/src/beacon_response.rs b/consensus/types/src/beacon_response.rs index 2e45854364..fc59fc9432 100644 --- a/consensus/types/src/beacon_response.rs +++ b/consensus/types/src/beacon_response.rs @@ -25,6 +25,7 @@ pub struct ForkVersionedResponse { /// `Deserialize`. #[derive(Debug, PartialEq, Clone, Serialize)] pub struct UnversionedResponse { + #[serde(flatten)] pub metadata: M, pub data: T, } @@ -195,9 +196,10 @@ impl From> for BeaconResponse { #[cfg(test)] mod fork_version_response_tests { + use crate::beacon_response::ExecutionOptimisticFinalizedMetadata; use crate::{ ExecutionPayload, ExecutionPayloadBellatrix, ForkName, ForkVersionedResponse, - MainnetEthSpec, + MainnetEthSpec, UnversionedResponse, }; use serde_json::json; @@ -236,4 +238,24 @@ mod fork_version_response_tests { assert!(result.is_err()); } + + // The following test should only pass by having the attribute #[serde(flatten)] on the metadata + #[test] + fn unversioned_response_serialize_dezerialize_round_trip_test() { + // Create an UnversionedResponse with some data + let data = UnversionedResponse { + metadata: ExecutionOptimisticFinalizedMetadata { + execution_optimistic: Some(false), + finalized: Some(false), + }, + data: "some_test_data".to_string(), + }; + + let serialized = serde_json::to_string(&data); + + let deserialized = + serde_json::from_str(&serialized.unwrap()).expect("Failed to deserialize"); + + assert_eq!(data, deserialized); + } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index d2efbfe909..1bd4927fe8 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -173,7 +173,21 @@ pub enum Error { AggregatorNotInCommittee { aggregator_index: u64, }, - PleaseNotifyTheDevs(String), + ComputeProposerIndicesPastEpoch { + current_epoch: Epoch, + request_epoch: Epoch, + }, + ComputeProposerIndicesInsufficientLookahead { + current_epoch: Epoch, + request_epoch: Epoch, + }, + ComputeProposerIndicesExcessiveLookahead { + current_epoch: Epoch, + request_epoch: Epoch, + }, + ProposerLookaheadOutOfBounds { + i: usize, + }, } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -578,6 +592,7 @@ where #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Fulu, Gloas))] + #[serde(with = "ssz_types::serde_utils::quoted_u64_fixed_vec")] pub proposer_lookahead: Vector, // Gloas @@ -886,8 +901,9 @@ impl BeaconState { &self, epoch: Epoch, block_root: Hash256, + spec: &ChainSpec, ) -> Result { - let decision_slot = self.proposer_shuffling_decision_slot(epoch); + let decision_slot = spec.proposer_shuffling_decision_slot::(epoch); if self.slot() <= decision_slot { Ok(block_root) } else { @@ -902,19 +918,18 @@ impl BeaconState { /// /// The `block_root` covers the one-off scenario where the genesis block decides its own /// shuffling. It should be set to the latest block applied to `self` or the genesis block root. - pub fn proposer_shuffling_decision_root(&self, block_root: Hash256) -> Result { - let decision_slot = self.proposer_shuffling_decision_slot(self.current_epoch()); - if self.slot() == decision_slot { - Ok(block_root) - } else { - self.get_block_root(decision_slot).copied() - } + pub fn proposer_shuffling_decision_root( + &self, + block_root: Hash256, + spec: &ChainSpec, + ) -> Result { + self.proposer_shuffling_decision_root_at_epoch(self.current_epoch(), block_root, spec) } - /// Returns the slot at which the proposer shuffling was decided. The block root at this slot - /// can be used to key the proposer shuffling for the given epoch. - fn proposer_shuffling_decision_slot(&self, epoch: Epoch) -> Slot { - epoch.start_slot(E::slots_per_epoch()).saturating_sub(1_u64) + pub fn epoch_cache_decision_root(&self, block_root: Hash256) -> Result { + // Epoch cache decision root for the current epoch (N) is the block root at the end of epoch + // N - 1. This is the same as the root that determines the next epoch attester shuffling. + self.attester_shuffling_decision_root(block_root, RelativeEpoch::Next) } /// Returns the block root which decided the attester shuffling for the given `relative_epoch`. @@ -998,6 +1013,45 @@ impl BeaconState { indices: &[usize], spec: &ChainSpec, ) -> Result, Error> { + // Regardless of fork, we never support computing proposer indices for past epochs. + let current_epoch = self.current_epoch(); + if epoch < current_epoch { + return Err(Error::ComputeProposerIndicesPastEpoch { + current_epoch, + request_epoch: epoch, + }); + } + + if spec.fork_name_at_epoch(epoch).fulu_enabled() { + // Post-Fulu we must never compute proposer indices using insufficient lookahead. This + // would be very dangerous as it would lead to conflicts between the *true* proposer as + // defined by `self.proposer_lookahead` and the output of this function. + // With MIN_SEED_LOOKAHEAD=1 (common config), this is equivalent to checking that the + // requested epoch is not the current epoch. + // + // We do not run this check if this function is called from `upgrade_to_fulu`, + // which runs *after* the slot is incremented, and needs to compute the proposer + // shuffling for the epoch that was just transitioned into. + if self.fork_name_unchecked().fulu_enabled() + && epoch < current_epoch.safe_add(spec.min_seed_lookahead)? + { + return Err(Error::ComputeProposerIndicesInsufficientLookahead { + current_epoch, + request_epoch: epoch, + }); + } + } else { + // Pre-Fulu the situation is reversed, we *should not* compute proposer indices using + // too much lookahead. To do so would make us vulnerable to changes in the proposer + // indices caused by effective balance changes. + if epoch >= current_epoch.safe_add(spec.min_seed_lookahead)? { + return Err(Error::ComputeProposerIndicesExcessiveLookahead { + current_epoch, + request_epoch: epoch, + }); + } + } + epoch .slot_iter(E::slots_per_epoch()) .map(|slot| { @@ -1146,10 +1200,7 @@ impl BeaconState { let index = slot.as_usize().safe_rem(E::slots_per_epoch() as usize)?; proposer_lookahead .get(index) - .ok_or(Error::PleaseNotifyTheDevs(format!( - "Proposer lookahead out of bounds: {} for slot: {}", - index, slot - ))) + .ok_or(Error::ProposerLookaheadOutOfBounds { i: index }) .map(|index| *index as usize) } else { // Pre-Fulu @@ -1168,6 +1219,25 @@ impl BeaconState { epoch: Epoch, spec: &ChainSpec, ) -> Result, Error> { + // This isn't in the spec, but we remove the footgun that is requesting the current epoch + // for a Fulu state. + if let Ok(proposer_lookahead) = self.proposer_lookahead() + && epoch >= self.current_epoch() + && epoch <= self.next_epoch()? + { + let slots_per_epoch = E::slots_per_epoch() as usize; + let start_offset = if epoch == self.current_epoch() { + 0 + } else { + slots_per_epoch + }; + return Ok(proposer_lookahead + .iter_from(start_offset)? + .take(slots_per_epoch) + .map(|x| *x as usize) + .collect()); + } + // Not using the cached validator indices since they are shuffled. let indices = self.get_active_validator_indices(epoch, spec)?; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index a1005d904a..50a2f268e0 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -227,7 +227,7 @@ pub struct ChainSpec { pub ttfb_timeout: u64, pub resp_timeout: u64, pub attestation_propagation_slot_range: u64, - pub maximum_gossip_clock_disparity_millis: u64, + pub maximum_gossip_clock_disparity: u64, pub message_domain_invalid_snappy: [u8; 4], pub message_domain_valid_snappy: [u8; 4], pub subnets_per_node: u8, @@ -670,7 +670,7 @@ impl ChainSpec { } pub fn maximum_gossip_clock_disparity(&self) -> Duration { - Duration::from_millis(self.maximum_gossip_clock_disparity_millis) + Duration::from_millis(self.maximum_gossip_clock_disparity) } pub fn ttfb_timeout(&self) -> Duration { @@ -865,6 +865,34 @@ impl ChainSpec { ) } + /// Returns the slot at which the proposer shuffling was decided. + /// + /// The block root at this slot can be used to key the proposer shuffling for the given epoch. + pub fn proposer_shuffling_decision_slot(&self, epoch: Epoch) -> Slot { + // At the Fulu fork epoch itself, the shuffling is computed "the old way" with no lookahead. + // Therefore for `epoch == fulu_fork_epoch` we must take the `else` branch. Checking if Fulu + // is enabled at `epoch - 1` accomplishes this neatly. + if self + .fork_name_at_epoch(epoch.saturating_sub(1_u64)) + .fulu_enabled() + { + // Post-Fulu the proposer shuffling decision slot for epoch N is the slot at the end + // of epoch N - 2 (note: min_seed_lookahead=1 in all current configs). + epoch + .saturating_sub(self.min_seed_lookahead) + .start_slot(E::slots_per_epoch()) + .saturating_sub(1_u64) + } else { + // Pre-Fulu the proposer shuffling decision slot for epoch N is the slot at the end of + // epoch N - 1 (note: +1 -1 for min_seed_lookahead=1 in all current configs). + epoch + .saturating_add(Epoch::new(1)) + .saturating_sub(self.min_seed_lookahead) + .start_slot(E::slots_per_epoch()) + .saturating_sub(1_u64) + } + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -1084,7 +1112,7 @@ impl ChainSpec { attestation_propagation_slot_range: default_attestation_propagation_slot_range(), attestation_subnet_count: 64, subnets_per_node: 2, - maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), + maximum_gossip_clock_disparity: default_maximum_gossip_clock_disparity(), target_aggregators_per_committee: 16, max_payload_size: default_max_payload_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), @@ -1430,7 +1458,7 @@ impl ChainSpec { attestation_propagation_slot_range: default_attestation_propagation_slot_range(), attestation_subnet_count: 64, subnets_per_node: 4, // Make this larger than usual to avoid network damage - maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), + maximum_gossip_clock_disparity: default_maximum_gossip_clock_disparity(), target_aggregators_per_committee: 16, max_payload_size: default_max_payload_size(), min_epochs_for_block_requests: 33024, @@ -1751,9 +1779,9 @@ pub struct Config { #[serde(default = "default_attestation_propagation_slot_range")] #[serde(with = "serde_utils::quoted_u64")] attestation_propagation_slot_range: u64, - #[serde(default = "default_maximum_gossip_clock_disparity_millis")] + #[serde(default = "default_maximum_gossip_clock_disparity")] #[serde(with = "serde_utils::quoted_u64")] - maximum_gossip_clock_disparity_millis: u64, + maximum_gossip_clock_disparity: u64, #[serde(default = "default_message_domain_invalid_snappy")] #[serde(with = "serde_utils::bytes_4_hex")] message_domain_invalid_snappy: [u8; 4], @@ -1967,7 +1995,7 @@ const fn default_attestation_propagation_slot_range() -> u64 { 32 } -const fn default_maximum_gossip_clock_disparity_millis() -> u64 { +const fn default_maximum_gossip_clock_disparity() -> u64 { 500 } @@ -2186,7 +2214,7 @@ impl Config { ttfb_timeout: spec.ttfb_timeout, resp_timeout: spec.resp_timeout, attestation_propagation_slot_range: spec.attestation_propagation_slot_range, - maximum_gossip_clock_disparity_millis: spec.maximum_gossip_clock_disparity_millis, + maximum_gossip_clock_disparity: spec.maximum_gossip_clock_disparity, message_domain_invalid_snappy: spec.message_domain_invalid_snappy, message_domain_valid_snappy: spec.message_domain_valid_snappy, max_request_blocks_deneb: spec.max_request_blocks_deneb, @@ -2274,7 +2302,7 @@ impl Config { message_domain_valid_snappy, max_request_blocks, attestation_propagation_slot_range, - maximum_gossip_clock_disparity_millis, + maximum_gossip_clock_disparity, max_request_blocks_deneb, max_request_blob_sidecars, max_request_data_column_sidecars, @@ -2350,7 +2378,7 @@ impl Config { attestation_subnet_prefix_bits, max_request_blocks, attestation_propagation_slot_range, - maximum_gossip_clock_disparity_millis, + maximum_gossip_clock_disparity, max_request_blocks_deneb, max_request_blob_sidecars, max_request_data_column_sidecars, @@ -2977,4 +3005,32 @@ mod yaml_tests { spec.min_epoch_data_availability_boundary(current_epoch) ); } + + #[test] + fn proposer_shuffling_decision_root_around_epoch_boundary() { + type E = MainnetEthSpec; + let fulu_fork_epoch = 5; + let spec = { + let mut spec = ForkName::Electra.make_genesis_spec(E::default_spec()); + spec.fulu_fork_epoch = Some(Epoch::new(fulu_fork_epoch)); + Arc::new(spec) + }; + + // For epochs prior to AND including the Fulu fork epoch, the decision slot is the end + // of the previous epoch (i.e. only 1 slot lookahead). + for epoch in (0..=fulu_fork_epoch).map(Epoch::new) { + assert_eq!( + spec.proposer_shuffling_decision_slot::(epoch), + epoch.start_slot(E::slots_per_epoch()) - 1 + ); + } + + // For epochs after Fulu, the decision slot is the end of the epoch two epochs prior. + for epoch in ((fulu_fork_epoch + 1)..(fulu_fork_epoch + 10)).map(Epoch::new) { + assert_eq!( + spec.proposer_shuffling_decision_slot::(epoch), + (epoch - 1).start_slot(E::slots_per_epoch()) - 1 + ); + } + } } diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 57f7a88e19..2272b1695c 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -143,6 +143,7 @@ pub enum DataColumnSidecarError { PreDeneb, SszError(SszError), BuildSidecarFailed(String), + InvalidCellProofLength { expected: usize, actual: usize }, } impl From for DataColumnSidecarError { diff --git a/consensus/types/src/epoch_cache.rs b/consensus/types/src/epoch_cache.rs index ef91c20d75..9956cb400a 100644 --- a/consensus/types/src/epoch_cache.rs +++ b/consensus/types/src/epoch_cache.rs @@ -5,9 +5,13 @@ use std::sync::Arc; /// Cache of values which are uniquely determined at the start of an epoch. /// /// The values are fixed with respect to the last block of the _prior_ epoch, which we refer -/// to as the "decision block". This cache is very similar to the `BeaconProposerCache` in that -/// beacon proposers are determined at exactly the same time as the values in this cache, so -/// the keys for the two caches are identical. +/// to as the "decision block". +/// +/// Prior to Fulu this cache was similar to the `BeaconProposerCache` in that beacon proposers were +/// determined at exactly the same time as the values in this cache, so the keys for the two caches +/// were identical. +/// +/// Post-Fulu, we use a different key (the proposers have more lookahead). #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct EpochCache { diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index f12b14ff6e..338e2b1e75 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -51,7 +51,7 @@ impl ForkName { /// This fork serves as the baseline for many tests, and the goal /// is to ensure features are passing on this fork. pub fn latest_stable() -> ForkName { - ForkName::Electra + ForkName::Fulu } /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` @@ -201,6 +201,46 @@ impl ForkName { pub fn gloas_enabled(self) -> bool { self >= ForkName::Gloas } + + pub fn fork_ascii(self) { + if self == ForkName::Fulu { + println!( + r#" + ╔═══════════════════════════════════════╗ + ║ ║ + ║ TO FULU, MOAR BLOBS TO ETHEREUM ║ + ║ ║ + ║ III DECEMBER MMXXV ║ + ║ ║ + ╚═══════════════════════════════════════╝ + + ============================================================================= + |||| |||| + |---------------------------------------------------------------------------| + |___-----___-----___-----___-----___-----___-----___-----___-----___-----___| + / _ \===/ _ \ / _ \===/ _ \ / _ \===/ _ \ / _ \===/ _ \ + ( (.\ oOo /.) ) ( (.\ oOo /.) ) ( (.\ oOo /.) ) ( (.\ oOo /.) ) + \__/=====\__/ \__/=====\__/ \__/=====\__/ \__/=====\__/ + ||||||| ||||||| ||||||| ||||||| + ||||||| ||||||| \\/), ||||||| ||||||| + ||||||| ||||||| ,'.' /, ||||||| ||||||| + ||||||| ||||||| (_)- / /, ||||||| ||||||| + ||||||| ||||||| /\_/ |__..--, * ||||||| ||||||| + ||||||| ||||||| (\___/\ \ \ / ).' ||||||| ||||||| + ||||||| ||||||| \____/ / (_ // ||||||| ||||||| + ||||||| ||||||| \\_ ,'--'\_( ||||||| ||||||| + (oOoOo) (oOoOo) )_)_/ )_/ )_) (oOoOo) (oOoOo) + J%%%%%L J%%%%%L (_(_.'(_.'(_.' J%%%%%L J%%%%%L + ZZZZZZZZZ ZZZZZZZZZ ZZZZZZZZZ ZZZZZZZZZ + =========================================================================== + |_________________________________________________________________________| + |___________________________________________________________________________| + |_____________________________________________________________________________| + |_______________________________________________________________________________| + "# + ); + } + } } /// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index c31183192f..ab54c0345f 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -208,6 +208,8 @@ pub struct DenebPreset { #[serde(with = "serde_utils::quoted_u64")] pub max_blob_commitments_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] + pub kzg_commitment_inclusion_proof_depth: u64, + #[serde(with = "serde_utils::quoted_u64")] pub field_elements_per_blob: u64, } @@ -215,6 +217,7 @@ impl DenebPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { Self { max_blob_commitments_per_block: E::max_blob_commitments_per_block() as u64, + kzg_commitment_inclusion_proof_depth: E::KzgCommitmentInclusionProofDepth::to_u64(), field_elements_per_blob: E::field_elements_per_blob() as u64, } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2eed9da4c0..e5ed7a8926 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.1.0" +version = "8.0.0-rc.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index 2e36eadf23..6f7dcdb595 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -124,7 +124,7 @@ async fn get_block_from_source( .unwrap() .unwrap(); let blobs_from_source = source - .get_blobs::(block_id, None, spec) + .get_blob_sidecars::(block_id, None, spec) .await .unwrap() .unwrap() diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index bf8241f8a2..ef680c9b96 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.1.0" +version = "8.0.0-rc.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 8660074e91..c93016a0f5 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -28,7 +28,7 @@ use std::path::PathBuf; use std::process::exit; use std::sync::LazyLock; use task_executor::ShutdownReason; -use tracing::{Level, info, warn}; +use tracing::{Level, info}; use tracing_subscriber::{Layer, filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; @@ -126,16 +126,6 @@ fn main() { .global(true) .display_order(0), ) - .arg( - Arg::new("logfile") - .long("logfile") - .value_name("PATH") - .help("DEPRECATED") - .action(ArgAction::Set) - .global(true) - .hide(true) - .display_order(0) - ) .arg( Arg::new("logfile-dir") .long("logfile-dir") @@ -385,48 +375,6 @@ fn main() { .global(true) .display_order(0) ) - .arg( - Arg::new("terminal-total-difficulty-override") - .long("terminal-total-difficulty-override") - .value_name("INTEGER") - .help("DEPRECATED") - .action(ArgAction::Set) - .global(true) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("terminal-block-hash-override") - .long("terminal-block-hash-override") - .value_name("TERMINAL_BLOCK_HASH") - .help("DEPRECATED") - .requires("terminal-block-hash-epoch-override") - .action(ArgAction::Set) - .global(true) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("terminal-block-hash-epoch-override") - .long("terminal-block-hash-epoch-override") - .value_name("EPOCH") - .help("DEPRECATED") - .requires("terminal-block-hash-override") - .action(ArgAction::Set) - .global(true) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("safe-slots-to-import-optimistically") - .long("safe-slots-to-import-optimistically") - .value_name("INTEGER") - .help("DEPRECATED") - .action(ArgAction::Set) - .global(true) - .display_order(0) - .hide(true) - ) .arg( Arg::new("genesis-state-url") .long("genesis-state-url") @@ -780,11 +728,6 @@ fn run( // Allow Prometheus access to the version and commit of the Lighthouse build. metrics::expose_lighthouse_version(); - // DEPRECATED: can be removed in v7.2.0/v8.0.0. - if clap_utils::parse_optional::(matches, "logfile")?.is_some() { - warn!("The --logfile flag is deprecated and replaced by --logfile-dir"); - } - #[cfg(all(feature = "modern", target_arch = "x86_64"))] if !std::is_x86_feature_detected!("adx") { tracing::warn!( @@ -793,20 +736,6 @@ fn run( ); } - // Warn for DEPRECATED global flags. This code should be removed when we finish deleting these - // flags. - let deprecated_flags = [ - "terminal-total-difficulty-override", - "terminal-block-hash-override", - "terminal-block-hash-epoch-override", - "safe-slots-to-import-optimistically", - ]; - for flag in deprecated_flags { - if matches.get_one::(flag).is_some() { - warn!("The {} flag is deprecated and does nothing", flag); - } - } - // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 1fd3cc1b79..5a057d7d7f 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -392,27 +392,35 @@ fn genesis_backfill_with_historic_flag() { .with_config(|config| assert!(config.chain.genesis_backfill)); } -// Tests for Eth1 flags. -// DEPRECATED but should not crash #[test] -fn eth1_blocks_per_log_query_flag() { +fn complete_blob_backfill_default() { CommandLineTest::new() - .flag("eth1-blocks-per-log-query", Some("500")) - .run_with_zero_port(); + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.complete_blob_backfill)); } -// DEPRECATED but should not crash + #[test] -fn eth1_purge_cache_flag() { +fn complete_blob_backfill_flag() { CommandLineTest::new() - .flag("eth1-purge-cache", None) - .run_with_zero_port(); + .flag("complete-blob-backfill", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.chain.complete_blob_backfill); + assert!(!config.store.prune_blobs); + }); } -// DEPRECATED but should not crash + +// Even if `--prune-blobs true` is provided, `--complete-blob-backfill` should override it to false. #[test] -fn eth1_cache_follow_distance_manual() { +fn complete_blob_backfill_and_prune_blobs_true() { CommandLineTest::new() - .flag("eth1-cache-follow-distance", Some("128")) - .run_with_zero_port(); + .flag("complete-blob-backfill", None) + .flag("prune-blobs", Some("true")) + .run_with_zero_port() + .with_config(|config| { + assert!(config.chain.complete_blob_backfill); + assert!(!config.store.prune_blobs); + }); } // Tests for Bellatrix flags. @@ -750,31 +758,6 @@ fn jwt_optional_flags() { fn jwt_optional_alias_flags() { run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); } -// DEPRECATED. This flag is deprecated but should not cause a crash. -#[test] -fn terminal_total_difficulty_override_flag() { - CommandLineTest::new() - .flag("terminal-total-difficulty-override", Some("1337424242")) - .run_with_zero_port(); -} -// DEPRECATED. This flag is deprecated but should not cause a crash. -#[test] -fn terminal_block_hash_and_activation_epoch_override_flags() { - CommandLineTest::new() - .flag("terminal-block-hash-epoch-override", Some("1337")) - .flag( - "terminal-block-hash-override", - Some("0x4242424242424242424242424242424242424242424242424242424242424242"), - ) - .run_with_zero_port(); -} -// DEPRECATED. This flag is deprecated but should not cause a crash. -#[test] -fn safe_slots_to_import_optimistically_flag() { - CommandLineTest::new() - .flag("safe-slots-to-import-optimistically", Some("421337")) - .run_with_zero_port(); -} // Tests for Network flags. #[test] @@ -802,6 +785,19 @@ fn network_subscribe_all_data_column_subnets_flag() { .with_config(|config| assert!(config.network.subscribe_all_data_column_subnets)); } #[test] +fn network_supernode_flag() { + CommandLineTest::new() + .flag("supernode", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.subscribe_all_data_column_subnets)); +} +#[test] +fn network_subscribe_all_data_column_subnets_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.network.subscribe_all_data_column_subnets)); +} +#[test] fn blob_publication_batches() { CommandLineTest::new() .flag("blob-publication-batches", Some("3")) @@ -1808,12 +1804,25 @@ fn slots_per_restore_point_flag() { .run_with_zero_port(); } +#[test] +fn block_cache_size_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.block_cache_size, 0)); +} #[test] fn block_cache_size_flag() { CommandLineTest::new() .flag("block-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.block_cache_size, new_non_zero_usize(4))); + .with_config(|config| assert_eq!(config.store.block_cache_size, 4)); +} +#[test] +fn block_cache_size_zero() { + CommandLineTest::new() + .flag("block-cache-size", Some("0")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.block_cache_size, 0)); } #[test] fn state_cache_size_default() { @@ -2466,42 +2475,6 @@ fn logfile_format_flag() { ) }); } -// DEPRECATED but should not crash. -#[test] -fn deprecated_logfile() { - CommandLineTest::new() - .flag("logfile", Some("test.txt")) - .run_with_zero_port(); -} - -// DEPRECATED but should not crash. -#[test] -fn sync_eth1_chain_disable_deposit_contract_sync_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new_with_no_execution_endpoint() - .flag("disable-deposit-contract-sync", None) - .flag("execution-endpoints", Some("http://localhost:8551/")) - .flag( - "execution-jwt", - dir.path().join("jwt-file").as_os_str().to_str(), - ) - .run_with_zero_port(); -} - -#[test] -#[should_panic] -fn disable_deposit_contract_sync_conflicts_with_staking() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new_with_no_execution_endpoint() - .flag("disable-deposit-contract-sync", None) - .flag("staking", None) - .flag("execution-endpoints", Some("http://localhost:8551/")) - .flag( - "execution-jwt", - dir.path().join("jwt-file").as_os_str().to_str(), - ) - .run_with_zero_port(); -} #[test] fn light_client_server_default() { @@ -2516,7 +2489,6 @@ fn light_client_server_default() { #[test] fn light_client_server_enabled() { CommandLineTest::new() - .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { assert!(config.network.enable_light_client_server); diff --git a/scripts/print_release_diffs.py b/scripts/print_release_diffs.py new file mode 100644 index 0000000000..d910b1be5b --- /dev/null +++ b/scripts/print_release_diffs.py @@ -0,0 +1,72 @@ +""" +Summarise pull requests between two Lighthouse releases. + +Usage: + export GITHUB_TOKEN=your_token + python -m pip install requests==2.32.4 + python print_release_diffs.py --base v7.0.1 --head release-v7.1.0 + +Shows commit SHA, PR number, 'backwards-incompat' label status, and PR title. +""" + +import requests +import re +import argparse +import os + +GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN") +if not GITHUB_TOKEN: + raise SystemExit("Error: Please set the GITHUB_TOKEN environment variable.") + +parser = argparse.ArgumentParser(description="Summarise PRs between two Lighthouse versions.") +parser.add_argument("--base", required=True, help="Base tag or branch (older release)") +parser.add_argument("--head", required=True, help="Head tag or branch (newer release)") +args = parser.parse_args() + +BASE = args.base +HEAD = args.head +OWNER = 'sigp' +REPO = 'lighthouse' + +HEADERS = { + 'Authorization': f'token {GITHUB_TOKEN}', + 'Accept': 'application/vnd.github+json' +} + +def get_commits_between(base, head): + url = f'https://api.github.com/repos/{OWNER}/{REPO}/compare/{base}...{head}' + response = requests.get(url, headers=HEADERS) + response.raise_for_status() + return response.json()['commits'] + +def has_backwards_incompat_label(pr_number): + url = f'https://api.github.com/repos/{OWNER}/{REPO}/issues/{pr_number}' + response = requests.get(url, headers=HEADERS) + if response.status_code != 200: + raise Exception(f"Failed to fetch PR #{pr_number}") + labels = response.json().get('labels', []) + return any(label['name'] == 'backwards-incompat' for label in labels) + +def main(): + commits = get_commits_between(BASE, HEAD) + print(" # Commit SHA PR Number Has backwards-incompat Label PR Title") + print("--- ------------ ----------- ------------------------------ --------------------------------------------") + + for i, commit in enumerate(commits, 1): + sha = commit['sha'][:12] + message = commit['commit']['message'] + pr_match = re.search(r"\(#(\d+)\)", message) + + if not pr_match: + print(f"{i:<3} {sha} {'-':<11} {'-':<30} [NO PR MATCH]: {message.splitlines()[0]}") + continue + + pr_number = int(pr_match.group(1)) + try: + has_label = has_backwards_incompat_label(pr_number) + print(f"{i:<3} {sha} {pr_number:<11} {str(has_label):<30} {message.splitlines()[0]}") + except Exception as e: + print(f"{i:<3} {sha} {pr_number:<11} {'ERROR':<30} [ERROR FETCHING PR]: {e}") + +if __name__ == '__main__': + main() diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml index f1b96dc9e5..2392011ed3 100644 --- a/scripts/tests/checkpoint-sync-config-devnet.yaml +++ b/scripts/tests/checkpoint-sync-config-devnet.yaml @@ -4,11 +4,15 @@ participants: cl_image: lighthouse:local el_type: geth el_image: ethpandaops/geth:master + cl_extra_params: + - --disable-backfill-rate-limiting supernode: true - cl_type: lighthouse cl_image: lighthouse:local el_type: geth el_image: ethpandaops/geth:master + cl_extra_params: + - --disable-backfill-rate-limiting supernode: false checkpoint_sync_enabled: true diff --git a/scripts/tests/checkpoint-sync.sh b/scripts/tests/checkpoint-sync.sh index a170d1e94d..df03da042e 100755 --- a/scripts/tests/checkpoint-sync.sh +++ b/scripts/tests/checkpoint-sync.sh @@ -15,7 +15,7 @@ CONFIG=${2:-$SCRIPT_DIR/checkpoint-sync-config-sepolia.yaml} # Interval for polling the /lighthouse/syncing endpoint for sync status POLL_INTERVAL_SECS=5 # Target number of slots to backfill to complete this test. -TARGET_BACKFILL_SLOTS=1024 +TARGET_BACKFILL_SLOTS=256 # Timeout for this test, if the node(s) fail to backfill `TARGET_BACKFILL_SLOTS` slots, fail the test. TIMEOUT_MINS=10 TIMEOUT_SECS=$((TIMEOUT_MINS * 60)) diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 78d802c228..54efb9f9ce 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -60,7 +60,7 @@ impl Case for ForkTest { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let mut result_state = self.pre.clone(); let mut expected = Some(self.post.clone()); - let spec = &E::default_spec(); + let spec = &fork_name.make_genesis_spec(E::default_spec()); let mut result = match fork_name { ForkName::Base => panic!("phase0 not supported"), diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 1368c495cd..1240785121 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -424,7 +424,7 @@ pub async fn verify_full_blob_production_up_to( // the `verify_full_block_production_up_to` function. if block.is_some() { remote_node - .get_blobs::(BlockId::Slot(Slot::new(slot)), None, &E::default_spec()) + .get_blobs::(BlockId::Slot(Slot::new(slot)), None) .await .map_err(|e| format!("Failed to get blobs at slot {slot:?}: {e:?}"))? .ok_or_else(|| format!("No blobs available at slot {slot:?}"))?; diff --git a/wordlist.txt b/wordlist.txt index 57674cf974..58c4cf6db1 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -39,6 +39,7 @@ EthStaker Exercism Extractable FFG +Fulu Geth GiB Gitcoin