diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cd45bd6d98..e768208973 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,8 +13,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index bcade948d7..1cd2f24548 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -36,12 +36,11 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install dependencies + - name: Install Kurtosis run: | - sudo add-apt-repository ppa:rmescandon/yq echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli yq + sudo apt install -y kurtosis-cli kurtosis analytics disable - name: Download Docker image artifact @@ -83,12 +82,11 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install dependencies + - name: Install Kurtosis run: | - sudo add-apt-repository ppa:rmescandon/yq echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli yq + sudo apt install -y kurtosis-cli kurtosis analytics disable - name: Download Docker image artifact @@ -119,12 +117,11 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install dependencies + - name: Install Kurtosis run: | - sudo add-apt-repository ppa:rmescandon/yq echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli yq + sudo apt install -y kurtosis-cli kurtosis analytics disable - name: Download Docker image artifact diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f1ec2e4655..cfba601fad 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,8 +10,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} REPO_NAME: ${{ github.repository_owner }}/lighthouse IMAGE_NAME: ${{ github.repository_owner }}/lighthouse # Enable self-hosted runners for the sigp repo only. diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 7cda3e477d..45f3b757e7 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -63,8 +63,8 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - name: Install dependencies - run: apt update && apt install -y cmake - - name: Generate code coverage + run: apt update && apt install -y cmake libclang-dev + - name: Check for deadlocks run: | cargo lockbud -k deadlock -b -l tokio_util @@ -83,6 +83,11 @@ jobs: runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 + # Set Java version to 21. (required since Web3Signer 24.12.0). + - uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '21' - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -345,7 +350,7 @@ jobs: - name: Check formatting with cargo fmt run: make cargo-fmt - name: Lint code for quality and style with Clippy - run: make lint + run: make lint-full - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock - name: Typecheck benchmark code without running it @@ -358,6 +363,8 @@ jobs: run: CARGO_HOME=$(readlink -f $HOME) make vendor - name: Markdown-linter run: make mdlint + - name: Spell-check + uses: rojopolis/spellcheck-github-actions@v0 check-msrv: name: check-msrv runs-on: ubuntu-latest @@ -420,7 +427,22 @@ jobs: channel: stable cache-target: release - name: Run Makefile to trigger the bash script - run: make cli + run: make cli-local + cargo-sort: + name: cargo-sort + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-sort + - name: Run cargo sort to check if Cargo.toml files are sorted + run: cargo sort --check --workspace # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: @@ -448,6 +470,7 @@ jobs: 'compile-with-beta-compiler', 'cli-check', 'lockbud', + 'cargo-sort', ] steps: - uses: actions/checkout@v4 diff --git a/.spellcheck.yml b/.spellcheck.yml new file mode 100644 index 0000000000..692bc4d176 --- /dev/null +++ b/.spellcheck.yml @@ -0,0 +1,35 @@ +matrix: +- name: Markdown + sources: + - './book/**/*.md' + - 'README.md' + - 'CONTRIBUTING.md' + - 'SECURITY.md' + - './scripts/local_testnet/README.md' + default_encoding: utf-8 + aspell: + lang: en + dictionary: + wordlists: + - wordlist.txt + encoding: utf-8 + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.markdown: + markdown_extensions: + - pymdownx.superfences: + - pymdownx.highlight: + - pymdownx.striphtml: + - pymdownx.magiclink: + - pyspelling.filters.html: + comments: false + ignores: + - code + - pre + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore hex strings + - open: '0x[a-fA-F0-9]' + close: '[^a-fA-F0-9]' + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c53558a10..4cad219c89 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,7 +85,7 @@ steps: 5. Commit your changes and push them to your fork with `$ git push origin your_feature_name`. 6. Go to your fork on github.com and use the web interface to create a pull - request into the sigp/lighthouse repo. + request into the sigp/lighthouse repository. From there, the repository maintainers will review the PR and either accept it or provide some constructive feedback. diff --git a/Cargo.lock b/Cargo.lock index b12df12265..c62e9fbc87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,6 +36,7 @@ dependencies = [ "tokio", "types", "validator_dir", + "zeroize", ] [[package]] @@ -59,19 +60,13 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -155,15 +150,15 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-consensus" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4177d135789e282e925092be8939d421b701c6d92c0a16679faa659d9166289d" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -183,9 +178,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d319bb544ca6caeab58c39cea8921c55d924d4f68f2c60f24f914673f9a74a" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -193,9 +188,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "499ee14d296a133d142efd215eb36bf96124829fe91cf8f5d4e5ccdd381eae00" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -210,9 +205,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.0" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a767e59c86900dd7c3ce3ecef04f3ace5ac9631ee150beb8b7d22f7fa3bbb2d7" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "arbitrary", @@ -220,25 +215,31 @@ dependencies = [ "cfg-if", "const-hex", "derive_arbitrary", - "derive_more 0.99.18", + "derive_more 1.0.0", + "foldhash", "getrandom", + "hashbrown 0.15.1", "hex-literal", + "indexmap 2.6.0", "itoa", - "k256 0.13.3", + "k256 0.13.4", "keccak-asm", + "paste", "proptest", "proptest-derive", "rand", "ruint", + "rustc-hash 2.0.0", "serde", + "sha3 0.10.8", "tiny-keccak", ] [[package]] name = "alloy-rlp" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -247,13 +248,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -279,9 +280,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -294,49 +295,49 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -488,9 +489,9 @@ checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -510,7 +511,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -522,7 +523,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "synstructure", ] @@ -534,7 +535,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -556,9 +557,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock", "cfg-if", @@ -567,7 +568,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.35", + "rustix 0.38.41", "slab", "tracing", "windows-sys 0.59.0", @@ -586,13 +587,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -649,20 +650,20 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -671,7 +672,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-util", "itoa", "matchit", @@ -684,7 +685,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tower", "tower-layer", @@ -694,9 +695,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -707,7 +708,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", "tracing", @@ -715,17 +716,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -794,12 +795,12 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "maplit", "merkle_proof", + "metrics", "oneshot_broadcast", "operation_pool", "parking_lot 0.12.3", @@ -833,7 +834,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.3.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -848,7 +849,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.4.1", + "hyper 1.5.1", "lighthouse_network", "monitoring_api", "node_test_rig", @@ -863,6 +864,23 @@ dependencies = [ "unused_port", ] +[[package]] +name = "beacon_node_fallback" +version = "0.1.0" +dependencies = [ + "environment", + "eth2", + "futures", + "itertools 0.10.5", + "serde", + "slog", + "slot_clock", + "strum", + "tokio", + "types", + "validator_metrics", +] + [[package]] name = "beacon_processor" version = "0.1.0" @@ -870,9 +888,9 @@ dependencies = [ "fnv", "futures", "itertools 0.10.5", - "lighthouse_metrics", "lighthouse_network", "logging", + "metrics", "num_cpus", "parking_lot 0.12.3", "serde", @@ -896,9 +914,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -906,12 +924,15 @@ dependencies = [ "itertools 0.12.1", "lazy_static", "lazycell", + "log", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.89", + "which", ] [[package]] @@ -1058,7 +1079,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.3.0" +version = "6.0.1" dependencies = [ "beacon_node", "bytes", @@ -1125,9 +1146,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] @@ -1197,7 +1218,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1208,9 +1229,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.15" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -1334,9 +1355,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -1344,9 +1365,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -1357,21 +1378,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "clap_utils" @@ -1398,7 +1419,6 @@ dependencies = [ "directory", "dirs", "environment", - "error-chain", "eth1", "eth2", "eth2_config", @@ -1409,8 +1429,8 @@ dependencies = [ "http_api", "http_metrics", "kzg", - "lighthouse_metrics", "lighthouse_network", + "metrics", "monitoring_api", "network", "operation_pool", @@ -1442,9 +1462,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "compare_fields" @@ -1473,9 +1493,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" dependencies = [ "cfg-if", "cpufeatures", @@ -1529,9 +1549,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -1780,7 +1800,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -1828,7 +1848,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -1850,7 +1870,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -1875,9 +1895,9 @@ dependencies = [ [[package]] name = "dary_heap" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" +checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" [[package]] name = "data-encoding" @@ -1930,11 +1950,12 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" +checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" dependencies = [ "futures", + "tokio", "tokio-util", ] @@ -2009,13 +2030,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2028,7 +2049,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2048,14 +2069,15 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", + "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e13bab2796f412722112327f3e575601a3e9cdcbe426f0d30dbf43f3f5dc71" +checksum = "cbf9649c05e0a9dbd6d0b0b8301db5182b972d0fd02f0a7c6736cf632d7c0fd5" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -2075,7 +2097,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2095,7 +2117,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2171,9 +2193,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" +checksum = "898d136ecb64116ec68aecf14d889bd30f8b1fe0c19e262953f7388dbe77052e" dependencies = [ "aes 0.8.4", "aes-gcm", @@ -2184,7 +2206,7 @@ dependencies = [ "enr", "fnv", "futures", - "hashlink 0.8.4", + "hashlink 0.9.1", "hex", "hkdf", "lazy_static", @@ -2192,13 +2214,13 @@ dependencies = [ "lru", "more-asserts", "multiaddr", - "parking_lot 0.11.2", + "parking_lot 0.12.3", "rand", "smallvec", - "socket2 0.4.10", + "socket2", "tokio", "tracing", - "uint", + "uint 0.10.0", "zeroize", ] @@ -2210,7 +2232,24 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", +] + +[[package]] +name = "doppelganger_service" +version = "0.1.0" +dependencies = [ + "beacon_node_fallback", + "environment", + "eth2", + "futures", + "logging", + "parking_lot 0.12.3", + "slog", + "slot_clock", + "task_executor", + "tokio", + "types", ] [[package]] @@ -2224,7 +2263,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2369,25 +2408,25 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] [[package]] name = "enr" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972070166c68827e64bd1ebc8159dd8e32d9bc2da7ebe8f20b61308f7974ad30" +checksum = "851bd664a3d3a3c175cff92b2f0df02df3c541b4895d0ae307611827aae46152" dependencies = [ "alloy-rlp", - "base64 0.21.7", + "base64 0.22.1", "bytes", "ed25519-dalek", "hex", - "k256 0.13.3", + "k256 0.13.4", "log", "rand", "serde", @@ -2397,14 +2436,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2476,16 +2515,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "backtrace", - "version_check", -] - [[package]] name = "eth1" version = "0.2.0" @@ -2497,9 +2526,9 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "lighthouse_metrics", "logging", "merkle_proof", + "metrics", "parking_lot 0.12.3", "sensitive_url", "serde", @@ -2533,8 +2562,6 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ - "account_utils", - "bytes", "derivative", "eth2_keystore", "ethereum_serde_utils", @@ -2542,7 +2569,6 @@ dependencies = [ "ethereum_ssz_derive", "futures", "futures-util", - "libsecp256k1", "lighthouse_network", "mediatype", "pretty_reqwest_error", @@ -2550,7 +2576,7 @@ dependencies = [ "proto_array", "psutil", "reqwest", - "ring 0.16.20", + "reqwest-eventsource", "sensitive_url", "serde", "serde_json", @@ -2559,6 +2585,7 @@ dependencies = [ "store", "tokio", "types", + "zeroize", ] [[package]] @@ -2675,8 +2702,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "thiserror", - "uint", + "thiserror 1.0.69", + "uint 0.9.5", ] [[package]] @@ -2692,8 +2719,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.10.8", - "thiserror", - "uint", + "thiserror 1.0.69", + "uint 0.9.5", ] [[package]] @@ -2735,7 +2762,7 @@ dependencies = [ "impl-rlp", "impl-serde 0.3.2", "primitive-types 0.10.1", - "uint", + "uint 0.9.5", ] [[package]] @@ -2751,7 +2778,7 @@ dependencies = [ "impl-serde 0.4.0", "primitive-types 0.12.2", "scale-info", - "uint", + "uint 0.9.5", ] [[package]] @@ -2798,7 +2825,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2817,7 +2844,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2884,7 +2911,7 @@ dependencies = [ "serde_json", "strum", "syn 1.0.109", - "thiserror", + "thiserror 1.0.69", "tiny-keccak", "unicode-xid", ] @@ -2912,7 +2939,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "tracing-futures", @@ -2951,6 +2978,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eventsource-stream" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" +dependencies = [ + "futures-core", + "nom", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -2997,10 +3035,10 @@ dependencies = [ "jsonwebtoken", "keccak-hash", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", + "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", "rand", @@ -3051,9 +3089,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fastrlp" @@ -3073,7 +3111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3161,13 +3199,13 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -3176,6 +3214,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -3198,7 +3242,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", - "lighthouse_metrics", + "metrics", "proto_array", "slog", "state_processing", @@ -3240,9 +3284,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -3265,9 +3309,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -3275,15 +3319,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -3293,15 +3337,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ "futures-core", "pin-project-lite", @@ -3309,13 +3353,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -3325,44 +3369,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.12", + "rustls 0.23.18", "rustls-pki-types", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" - -[[package]] -name = "futures-ticker" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" -dependencies = [ - "futures", - "futures-timer", - "instant", -] +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +dependencies = [ + "gloo-timers", + "send_wrapper 0.4.0", +] [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -3442,9 +3479,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git-version" @@ -3463,7 +3500,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -3472,6 +3509,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "gossipsub" version = "0.5.0" @@ -3484,7 +3533,6 @@ dependencies = [ "either", "fnv", "futures", - "futures-ticker", "futures-timer", "getrandom", "hashlink 0.9.1", @@ -3503,6 +3551,18 @@ dependencies = [ "web-time", ] +[[package]] +name = "graffiti_file" +version = "0.1.0" +dependencies = [ + "bls", + "hex", + "serde", + "slog", + "tempfile", + "types", +] + [[package]] name = "group" version = "0.12.1" @@ -3539,7 +3599,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3587,6 +3647,18 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] + [[package]] name = "hashers" version = "1.0.1" @@ -3709,8 +3781,8 @@ dependencies = [ "ipnet", "once_cell", "rand", - "socket2 0.5.7", - "thiserror", + "socket2", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -3733,7 +3805,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -3787,6 +3859,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3871,11 +3952,11 @@ dependencies = [ "futures", "genesis", "hex", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "lru", + "metrics", "network", "operation_pool", "parking_lot 0.12.3", @@ -3905,11 +3986,11 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "reqwest", "serde", "slog", @@ -3923,9 +4004,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3941,9 +4022,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -3956,7 +4037,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -3965,9 +4046,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3990,7 +4071,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -4003,7 +4084,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper 0.14.31", "native-tls", "tokio", "tokio-native-tls", @@ -4011,24 +4092,25 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.1", "pin-project-lite", "tokio", + "tower-service", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4047,6 +4129,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -4065,12 +4265,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -4085,9 +4296,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ "async-io", "core-foundation", @@ -4096,8 +4307,12 @@ dependencies = [ "if-addrs", "ipnet", "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", "rtnetlink", - "system-configuration", + "system-configuration 0.6.1", "tokio", "windows", ] @@ -4113,7 +4328,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rand", "tokio", @@ -4136,7 +4351,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.12", + "parity-scale-codec 3.7.0", ] [[package]] @@ -4168,13 +4383,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -4195,12 +4410,40 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ + "arbitrary", "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.1", + "serde", +] + +[[package]] +name = "initialized_validators" +version = "0.1.0" +dependencies = [ + "account_utils", + "bincode", + "bls", + "eth2_keystore", + "filesystem", + "lockfile", + "metrics", + "parking_lot 0.12.3", + "rand", + "reqwest", + "serde", + "serde_json", + "signing_method", + "slog", + "tokio", + "types", + "url", + "validator_dir", + "validator_metrics", + "zeroize", ] [[package]] @@ -4259,7 +4502,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring 1.1.0", "windows-sys 0.48.0", "winreg", @@ -4267,9 +4510,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" @@ -4317,9 +4560,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jobserver" @@ -4332,9 +4575,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -4369,9 +4612,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -4392,9 +4635,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4446,7 +4689,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.3.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -4505,9 +4748,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.158" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libflate" @@ -4545,9 +4788,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libmdbx" @@ -4561,7 +4804,7 @@ dependencies = [ "libc", "mdbx-sys", "parking_lot 0.12.3", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4593,7 +4836,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4641,7 +4884,7 @@ dependencies = [ "rand", "rw-stream-sink", "smallvec", - "thiserror", + "thiserror 1.0.69", "tracing", "unsigned-varint 0.8.0", "void", @@ -4682,16 +4925,16 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror", + "thiserror 1.0.69", "tracing", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58 0.5.1", @@ -4704,9 +4947,8 @@ dependencies = [ "rand", "sec1 0.7.3", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "tracing", - "void", "zeroize", ] @@ -4725,7 +4967,7 @@ dependencies = [ "libp2p-swarm", "rand", "smallvec", - "socket2 0.5.7", + "socket2", "tokio", "tracing", "void", @@ -4786,7 +5028,7 @@ dependencies = [ "sha2 0.10.8", "snow", "static_assertions", - "thiserror", + "thiserror 1.0.69", "tracing", "x25519-dalek", "zeroize", @@ -4825,9 +5067,9 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.12", - "socket2 0.5.7", - "thiserror", + "rustls 0.23.18", + "socket2", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -4865,7 +5107,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -4880,7 +5122,7 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2 0.5.7", + "socket2", "tokio", "tracing", ] @@ -4897,9 +5139,9 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.18", "rustls-webpki 0.101.7", - "thiserror", + "thiserror 1.0.69", "x509-parser", "yasna", ] @@ -4929,10 +5171,10 @@ dependencies = [ "either", "futures", "libp2p-core", - "thiserror", + "thiserror 1.0.69", "tracing", "yamux 0.12.1", - "yamux 0.13.3", + "yamux 0.13.4", ] [[package]] @@ -5017,11 +5259,12 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.3.0" +version = "6.0.1" dependencies = [ "account_manager", "account_utils", "beacon_node", + "beacon_node_fallback", "beacon_processor", "bls", "boot_node", @@ -5035,11 +5278,12 @@ dependencies = [ "eth2_network_config", "ethereum_hashing", "futures", - "lighthouse_metrics", + "initialized_validators", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "sensitive_url", "serde", "serde_json", @@ -5054,13 +5298,7 @@ dependencies = [ "validator_client", "validator_dir", "validator_manager", -] - -[[package]] -name = "lighthouse_metrics" -version = "0.2.0" -dependencies = [ - "prometheus", + "zeroize", ] [[package]] @@ -5076,7 +5314,6 @@ dependencies = [ "dirs", "discv5", "either", - "error-chain", "ethereum_ssz", "ethereum_ssz_derive", "fnv", @@ -5086,11 +5323,11 @@ dependencies = [ "itertools 0.10.5", "libp2p", "libp2p-mplex", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "lru_cache", + "metrics", "parking_lot 0.12.3", "prometheus-client", "quickcheck", @@ -5146,6 +5383,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "lmdb-rkv" version = "0.14.0" @@ -5196,7 +5439,7 @@ name = "logging" version = "0.2.0" dependencies = [ "chrono", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "serde", "serde_json", @@ -5214,11 +5457,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.1", ] [[package]] @@ -5252,7 +5495,7 @@ name = "malloc_utils" version = "0.1.0" dependencies = [ "libc", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "tikv-jemalloc-ctl", "tikv-jemallocator", @@ -5347,18 +5590,18 @@ dependencies = [ [[package]] name = "metastruct" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00a5ba4a0f3453c31c397b214e1675d95b697c33763aa58add57ea833424384" +checksum = "d74f54f231f9a18d77393ecc5cc7ab96709b2a61ee326c2b2b291009b0cc5a07" dependencies = [ "metastruct_macro", ] [[package]] name = "metastruct_macro" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3a991d4536c933306e52f0e8ab303757185ec13a09d1f3e1cbde5a0d8410bf" +checksum = "985e7225f3a4dfbec47a0c6a730a874185fda840d365d7bbd6ba199dd81796d5" dependencies = [ "darling 0.13.4", "itertools 0.10.5", @@ -5368,6 +5611,13 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "metrics" +version = "0.2.0" +dependencies = [ + "prometheus", +] + [[package]] name = "migrations_internals" version = "2.2.0" @@ -5434,15 +5684,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -5475,8 +5716,8 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", - "lighthouse_metrics", "lighthouse_version", + "metrics", "regex", "reqwest", "sensitive_url", @@ -5496,9 +5737,9 @@ checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -5509,7 +5750,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", "url", ] @@ -5526,12 +5767,12 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ "core2", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -5567,21 +5808,20 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.4.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" dependencies = [ "anyhow", "byteorder", - "libc", "netlink-packet-utils", ] [[package]] name = "netlink-packet-route" -version = "0.12.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -5600,21 +5840,21 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -5644,7 +5884,6 @@ dependencies = [ "bls", "delay_map", "derivative", - "error-chain", "eth2", "eth2_network_config", "ethereum_ssz", @@ -5657,11 +5896,11 @@ dependencies = [ "igd-next", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_network", "logging", "lru_cache", "matches", + "metrics", "operation_pool", "parking_lot 0.12.3", "rand", @@ -5692,6 +5931,17 @@ dependencies = [ "libc", ] +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + [[package]] name = "nix" version = "0.29.0" @@ -5709,6 +5959,7 @@ name = "node_test_rig" version = "0.2.0" dependencies = [ "beacon_node", + "beacon_node_fallback", "environment", "eth2", "execution_layer", @@ -5831,9 +6082,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] @@ -5849,9 +6100,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oneshot_broadcast" @@ -5899,9 +6150,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -5920,7 +6171,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -5931,18 +6182,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.3.1+3.3.1" +version = "300.4.1+3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" +checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -5961,8 +6212,8 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", - "lighthouse_metrics", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -6016,15 +6267,16 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.12", + "parity-scale-codec-derive 3.7.0", + "rustversion", "serde", ] @@ -6042,21 +6294,21 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -6101,7 +6353,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -6171,12 +6423,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.69", "ucd-trie", ] @@ -6210,29 +6462,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -6262,9 +6514,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "platforms" @@ -6274,9 +6526,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -6287,30 +6539,30 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.35", + "rustix 0.38.41", "tracing", "windows-sys 0.59.0", ] @@ -6358,9 +6610,9 @@ dependencies = [ [[package]] name = "postgres-types" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02048d9e032fb3cc3413bbf7b83a15d84a5d419778e2628751896d856498eee9" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" dependencies = [ "bytes", "fallible-iterator", @@ -6384,9 +6636,9 @@ dependencies = [ [[package]] name = "pq-sys" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24ff9e4cf6945c988f0db7005d87747bf72864965c3529d259ad155ac41d584" +checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" dependencies = [ "vcpkg", ] @@ -6399,6 +6651,16 @@ dependencies = [ "sensitive_url", ] +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.89", +] + [[package]] name = "primeorder" version = "0.13.6" @@ -6418,7 +6680,7 @@ dependencies = [ "impl-codec 0.5.1", "impl-rlp", "impl-serde 0.3.2", - "uint", + "uint 0.9.5", ] [[package]] @@ -6432,7 +6694,7 @@ dependencies = [ "impl-rlp", "impl-serde 0.4.0", "scale-info", - "uint", + "uint 0.9.5", ] [[package]] @@ -6451,14 +6713,14 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.22", ] [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -6490,7 +6752,7 @@ dependencies = [ "memchr", "parking_lot 0.12.3", "protobuf", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6513,7 +6775,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -6530,7 +6792,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -6544,7 +6806,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -6581,7 +6843,7 @@ dependencies = [ "num_cpus", "once_cell", "platforms", - "thiserror", + "thiserror 1.0.69", "unescape", ] @@ -6608,7 +6870,7 @@ dependencies = [ "asynchronous-codec", "bytes", "quick-protobuf", - "thiserror", + "thiserror 1.0.69", "unsigned-varint 0.8.0", ] @@ -6636,9 +6898,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.4" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d2fb862b7ba45e615c1429def928f2e15f815bdf933b27a2d3824e224c1f46" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "futures-io", @@ -6646,41 +6908,45 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", - "socket2 0.5.7", - "thiserror", + "rustls 0.23.18", + "socket2", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.7" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a9b3a42929fad8a7c3de7f86ce0814cfa893328157672680e9fb1145549c5" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.18", + "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6734,6 +7000,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core", + "serde", ] [[package]] @@ -6798,9 +7065,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" +checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" dependencies = [ "libc", ] @@ -6816,18 +7083,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -6840,19 +7098,19 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -6866,13 +7124,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -6883,9 +7141,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -6901,7 +7159,7 @@ dependencies = [ "h2", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls", "hyper-tls", "ipnet", @@ -6918,7 +7176,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", @@ -6933,6 +7191,22 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest-eventsource" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f529a5ff327743addc322af460761dff5b50e0c826b9e6ac44c3195c50bb2026" +dependencies = [ + "eventsource-stream", + "futures-core", + "futures-timer", + "mime", + "nom", + "pin-project-lite", + "reqwest", + "thiserror 1.0.69", +] + [[package]] name = "resolv-conf" version = "0.7.0" @@ -7042,16 +7316,19 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" dependencies = [ "futures", "log", + "netlink-packet-core", "netlink-packet-route", + "netlink-packet-utils", "netlink-proto", - "nix 0.24.3", - "thiserror", + "netlink-sys", + "nix 0.26.4", + "thiserror 1.0.69", "tokio", ] @@ -7069,7 +7346,7 @@ dependencies = [ "fastrlp", "num-bigint", "num-traits", - "parity-scale-codec 3.6.12", + "parity-scale-codec 3.7.0", "primitive-types 0.12.2", "proptest", "rand", @@ -7181,9 +7458,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -7213,21 +7490,21 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -7243,19 +7520,21 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -7269,9 +7548,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7280,9 +7559,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -7337,35 +7616,35 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "cfg-if", - "derive_more 0.99.18", - "parity-scale-codec 3.6.12", + "derive_more 1.0.0", + "parity-scale-codec 3.7.0", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7454,9 +7733,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -7482,13 +7761,19 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + [[package]] name = "send_wrapper" version = "0.6.0" @@ -7505,9 +7790,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -7524,20 +7809,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -7563,14 +7848,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -7615,7 +7900,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -7681,9 +7966,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -7733,6 +8018,22 @@ dependencies = [ "rand_core", ] +[[package]] +name = "signing_method" +version = "0.1.0" +dependencies = [ + "eth2_keystore", + "ethereum_serde_utils", + "lockfile", + "parking_lot 0.12.3", + "reqwest", + "serde", + "task_executor", + "types", + "url", + "validator_metrics", +] + [[package]] name = "simple_asn1" version = "0.6.2" @@ -7741,7 +8042,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -7791,12 +8092,12 @@ dependencies = [ "filesystem", "flate2", "libmdbx", - "lighthouse_metrics", "lmdb-rkv", "lmdb-rkv-sys", "logging", "lru", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -7952,7 +8253,7 @@ dependencies = [ name = "slot_clock" version = "0.2.0" dependencies = [ - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "types", ] @@ -7989,16 +8290,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -8080,8 +8371,8 @@ dependencies = [ "int_to_bytes", "integer-sqrt", "itertools 0.10.5", - "lighthouse_metrics", "merkle_proof", + "metrics", "rand", "rayon", "safe_arith", @@ -8115,23 +8406,31 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", + "bls", + "criterion", "db-key", "directory", "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", "leveldb", - "lighthouse_metrics", + "logging", "lru", + "metrics", "parking_lot 0.12.3", + "rand", "safe_arith", "serde", "slog", "sloggers", + "smallvec", "state_processing", "strum", + "superstruct", "tempfile", "types", + "xdelta3", + "zstd 0.13.2", ] [[package]] @@ -8222,9 +8521,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -8239,9 +8538,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "synstructure" @@ -8251,7 +8550,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8277,7 +8576,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -8290,6 +8600,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "system_health" version = "0.1.0" @@ -8332,23 +8652,24 @@ version = "0.1.0" dependencies = [ "async-channel", "futures", - "lighthouse_metrics", "logging", + "metrics", "slog", "sloggers", "tokio", + "tracing", ] [[package]] name = "tempfile" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.35", + "rustix 0.38.41", "windows-sys 0.59.0", ] @@ -8374,12 +8695,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ - "rustix 0.38.35", - "windows-sys 0.48.0", + "rustix 0.38.41", + "windows-sys 0.59.0", ] [[package]] @@ -8417,22 +8738,42 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] @@ -8540,7 +8881,7 @@ dependencies = [ "rand", "rustc-hash 1.1.0", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", "zeroize", @@ -8555,6 +8896,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -8582,9 +8933,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -8592,7 +8943,7 @@ dependencies = [ "mio", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -8615,7 +8966,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8630,9 +8981,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03adcf0147e203b6032c0b2d30be1415ba03bc348901f3ff1cc0df6a733e60c3" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" dependencies = [ "async-trait", "byteorder", @@ -8648,7 +8999,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.7", + "socket2", "tokio", "tokio-util", "whoami", @@ -8677,9 +9028,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -8689,9 +9040,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -8720,7 +9071,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.22", ] [[package]] @@ -8738,34 +9089,34 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow 0.6.20", ] [[package]] name = "tower" -version = "0.4.13" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ "futures-core", "futures-util", - "pin-project", "pin-project-lite", + "sync_wrapper 0.1.2", "tokio", "tower-layer", "tower-service", @@ -8803,7 +9154,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror", + "thiserror 1.0.69", "time", "tracing-subscriber", ] @@ -8816,7 +9167,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8907,7 +9258,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8922,9 +9273,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" +checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" dependencies = [ "serde", "stable_deref_trait", @@ -8997,9 +9348,9 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -9013,6 +9364,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -9027,45 +9390,42 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -9122,15 +9482,27 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -9152,54 +9524,34 @@ name = "validator_client" version = "0.3.5" dependencies = [ "account_utils", - "bincode", - "bls", + "beacon_node_fallback", "clap", "clap_utils", - "deposit_contract", "directory", "dirs", + "doppelganger_service", "environment", "eth2", - "eth2_keystore", - "ethereum_serde_utils", "fdlimit", - "filesystem", - "futures", - "hex", - "hyper 1.4.1", - "itertools 0.10.5", - "libsecp256k1", - "lighthouse_metrics", - "lighthouse_version", - "lockfile", - "logging", - "malloc_utils", + "graffiti_file", + "hyper 1.5.1", + "initialized_validators", + "metrics", "monitoring_api", "parking_lot 0.12.3", - "rand", "reqwest", - "ring 0.16.20", - "safe_arith", "sensitive_url", "serde", - "serde_json", "slashing_protection", "slog", "slot_clock", - "strum", - "sysinfo", - "system_health", - "task_executor", - "tempfile", "tokio", - "tokio-stream", - "tree_hash", "types", - "url", - "validator_dir", - "warp", - "warp_utils", + "validator_http_api", + "validator_http_metrics", + "validator_metrics", + "validator_services", + "validator_store", ] [[package]] @@ -9220,6 +9572,70 @@ dependencies = [ "types", ] +[[package]] +name = "validator_http_api" +version = "0.1.0" +dependencies = [ + "account_utils", + "beacon_node_fallback", + "bls", + "deposit_contract", + "directory", + "dirs", + "doppelganger_service", + "eth2", + "eth2_keystore", + "ethereum_serde_utils", + "filesystem", + "futures", + "graffiti_file", + "initialized_validators", + "itertools 0.10.5", + "lighthouse_version", + "logging", + "parking_lot 0.12.3", + "rand", + "sensitive_url", + "serde", + "signing_method", + "slashing_protection", + "slog", + "slot_clock", + "sysinfo", + "system_health", + "task_executor", + "tempfile", + "tokio", + "tokio-stream", + "types", + "url", + "validator_dir", + "validator_services", + "validator_store", + "warp", + "warp_utils", + "zeroize", +] + +[[package]] +name = "validator_http_metrics" +version = "0.1.0" +dependencies = [ + "lighthouse_version", + "malloc_utils", + "metrics", + "parking_lot 0.12.3", + "serde", + "slog", + "slot_clock", + "types", + "validator_metrics", + "validator_services", + "validator_store", + "warp", + "warp_utils", +] + [[package]] name = "validator_manager" version = "0.1.0" @@ -9227,6 +9643,7 @@ dependencies = [ "account_utils", "clap", "clap_utils", + "derivative", "environment", "eth2", "eth2_network_config", @@ -9240,7 +9657,55 @@ dependencies = [ "tokio", "tree_hash", "types", - "validator_client", + "validator_http_api", + "zeroize", +] + +[[package]] +name = "validator_metrics" +version = "0.1.0" +dependencies = [ + "metrics", +] + +[[package]] +name = "validator_services" +version = "0.1.0" +dependencies = [ + "beacon_node_fallback", + "bls", + "doppelganger_service", + "environment", + "eth2", + "futures", + "graffiti_file", + "parking_lot 0.12.3", + "safe_arith", + "slog", + "slot_clock", + "tokio", + "tree_hash", + "types", + "validator_metrics", + "validator_store", +] + +[[package]] +name = "validator_store" +version = "0.1.0" +dependencies = [ + "account_utils", + "doppelganger_service", + "initialized_validators", + "parking_lot 0.12.3", + "serde", + "signing_method", + "slashing_protection", + "slog", + "slot_clock", + "task_executor", + "types", + "validator_metrics", ] [[package]] @@ -9312,13 +9777,13 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "mime", "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "scoped-tls", "serde", "serde_json", @@ -9338,7 +9803,7 @@ dependencies = [ "bytes", "eth2", "headers", - "lighthouse_metrics", + "metrics", "safe_arith", "serde", "serde_array_query", @@ -9363,9 +9828,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -9374,24 +9839,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -9401,9 +9866,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9411,28 +9876,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -9471,7 +9936,7 @@ dependencies = [ "env_logger 0.9.3", "eth2", "http_api", - "hyper 1.4.1", + "hyper 1.5.1", "log", "logging", "network", @@ -9492,9 +9957,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -9520,19 +9985,21 @@ dependencies = [ "eth2_keystore", "eth2_network_config", "futures", + "initialized_validators", "logging", "parking_lot 0.12.3", "reqwest", "serde", "serde_json", "serde_yaml", + "slashing_protection", "slot_clock", "task_executor", "tempfile", "tokio", "types", "url", - "validator_client", + "validator_store", "zip", ] @@ -9543,12 +10010,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] -name = "whoami" -version = "1.5.1" +name = "which" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ - "redox_syscall 0.4.1", + "either", + "home", + "once_cell", + "rustix 0.38.41", +] + +[[package]] +name = "whoami" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +dependencies = [ + "redox_syscall 0.5.7", "wasite", "web-sys", ] @@ -9598,12 +10077,12 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.51.1" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" dependencies = [ - "windows-core 0.51.1", - "windows-targets 0.48.5", + "windows-core 0.53.0", + "windows-targets 0.52.6", ] [[package]] @@ -9620,18 +10099,28 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.52.0" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" dependencies = [ "windows-targets 0.52.6", ] @@ -9861,9 +10350,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -9878,6 +10367,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -9890,8 +10391,8 @@ dependencies = [ "log", "pharos", "rustc_version 0.4.1", - "send_wrapper", - "thiserror", + "send_wrapper 0.6.0", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -9937,15 +10438,29 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] +[[package]] +name = "xdelta3" +version = "0.1.5" +source = "git+http://github.com/sigp/xdelta3-rs?rev=50d63cdf1878e5cf3538e9aae5eed34a22c64e4a#50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" +dependencies = [ + "bindgen", + "cc", + "futures-io", + "futures-util", + "libc", + "log", + "rand", +] + [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" [[package]] name = "xmltree" @@ -9984,9 +10499,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31b5e376a8b012bee9c423acdbb835fc34d45001cfa3106236a624e4b738028" +checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" dependencies = [ "futures", "log", @@ -10007,6 +10522,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -10025,7 +10564,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", ] [[package]] @@ -10034,6 +10594,7 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ + "serde", "zeroize_derive", ] @@ -10045,7 +10606,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] @@ -10065,7 +10648,7 @@ dependencies = [ "pbkdf2 0.11.0", "sha1", "time", - "zstd", + "zstd 0.11.2+zstd.1.5.2", ] [[package]] @@ -10074,7 +10657,16 @@ version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "zstd-safe", + "zstd-safe 5.0.2+zstd.1.5.2", +] + +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe 7.2.1", ] [[package]] @@ -10087,6 +10679,15 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.13+zstd.1.5.6" diff --git a/Cargo.toml b/Cargo.toml index 94ac8e13ff..23e52a306b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,11 +8,11 @@ members = [ "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", - "beacon_node/lighthouse_network", - "beacon_node/lighthouse_network/gossipsub", "beacon_node/execution_layer", "beacon_node/http_api", "beacon_node/http_metrics", + "beacon_node/lighthouse_network", + "beacon_node/lighthouse_network/gossipsub", "beacon_node/network", "beacon_node/store", "beacon_node/timer", @@ -30,40 +30,40 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/lighthouse_metrics", "common/lighthouse_version", "common/lockfile", "common/logging", "common/lru_cache", "common/malloc_utils", + "common/metrics", + "common/monitoring_api", "common/oneshot_broadcast", "common/pretty_reqwest_error", "common/sensitive_url", "common/slot_clock", "common/system_health", - "common/task_executor", "common/target_check", + "common/task_executor", "common/test_random_derive", "common/unused_port", "common/validator_dir", "common/warp_utils", - "common/monitoring_api", - - "database_manager", - - "consensus/int_to_bytes", "consensus/fixed_bytes", "consensus/fork_choice", + + "consensus/int_to_bytes", "consensus/proto_array", "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", "crypto/bls", - "crypto/kzg", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", + "crypto/kzg", + + "database_manager", "lcli", @@ -78,12 +78,22 @@ members = [ "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", - "testing/test-test_logger", "testing/state_transition_vectors", + "testing/test-test_logger", "testing/web3signer_tests", "validator_client", + "validator_client/beacon_node_fallback", + "validator_client/doppelganger_service", + "validator_client/graffiti_file", + "validator_client/http_api", + "validator_client/http_metrics", + "validator_client/initialized_validators", + "validator_client/signing_method", "validator_client/slashing_protection", + "validator_client/validator_metrics", + "validator_client/validator_services", + "validator_client/validator_store", "validator_manager", @@ -101,6 +111,7 @@ alloy-consensus = "0.3.0" anyhow = "1" arbitrary = { version = "1", features = ["derive"] } async-channel = "1.9.0" +axum = "0.7.7" bincode = "1" bitvec = "1" byteorder = "1" @@ -111,14 +122,20 @@ clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } c-kzg = { version = "1", default-features = false } compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.5" -delay_map = "0.3" +delay_map = "0.4" derivative = "2" dirs = "3" either = "1.9" -rust_eth_kzg = "0.5.1" -discv5 = { version = "0.7", features = ["libp2p"] } +# TODO: rust_eth_kzg is pinned for now while a perf regression is investigated +# The crate_crypto_* dependencies can be removed from this file completely once we update +rust_eth_kzg = "=0.5.1" +crate_crypto_internal_eth_kzg_bls12_381 = "=0.5.1" +crate_crypto_internal_eth_kzg_erasure_codes = "=0.5.1" +crate_crypto_internal_eth_kzg_maybe_rayon = "=0.5.1" +crate_crypto_internal_eth_kzg_polynomial = "=0.5.1" +crate_crypto_kzg_multi_open_fk20 = "=0.5.1" +discv5 = { version = "0.9", features = ["libp2p"] } env_logger = "0.9" -error-chain = "0.12" ethereum_hashing = "0.7.0" ethereum_serde_utils = "0.7" ethereum_ssz = "0.7" @@ -129,6 +146,7 @@ exit-future = "0.2" fnv = "1" fs2 = "0.4" futures = "0.3" +graffiti_file = { path = "validator_client/graffiti_file" } hex = "0.4" hashlink = "0.9.0" hyper = "1" @@ -141,6 +159,7 @@ milhouse = "0.3" num_cpus = "1" parking_lot = "0.12" paste = "1" +prometheus = "0.13" quickcheck = "1" quickcheck_macros = "1" quote = "1" @@ -148,7 +167,13 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } +reqwest = { version = "0.11", default-features = false, features = [ + "blocking", + "json", + "stream", + "rustls-tls", + "native-tls-vendored", +] } ring = "0.16" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } @@ -157,7 +182,11 @@ serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = ["max_level_debug", "release_max_level_debug", "nested-values"] } +slog = { version = "2", features = [ + "max_level_debug", + "release_max_level_debug", + "nested-values", +] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } @@ -169,7 +198,12 @@ superstruct = "0.8" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } +tokio = { version = "1", features = [ + "rt-multi-thread", + "sync", + "signal", + "macros", +] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" @@ -182,19 +216,22 @@ tree_hash_derive = "0.8" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.7", default-features = false, features = ["tls"] } -zeroize = { version = "1", features = ["zeroize_derive"] } +zeroize = { version = "1", features = ["zeroize_derive", "serde"] } zip = "0.6" # Local crates. account_utils = { path = "common/account_utils" } beacon_chain = { path = "beacon_node/beacon_chain" } beacon_node = { path = "beacon_node" } +beacon_node_fallback = { path = "validator_client/beacon_node_fallback" } beacon_processor = { path = "beacon_node/beacon_processor" } bls = { path = "crypto/bls" } clap_utils = { path = "common/clap_utils" } compare_fields = { path = "common/compare_fields" } deposit_contract = { path = "common/deposit_contract" } directory = { path = "common/directory" } +doppelganger_service = { path = "validator_client/doppelganger_service" } +validator_services = { path = "validator_client/validator_services" } environment = { path = "lighthouse/environment" } eth1 = { path = "beacon_node/eth1" } eth1_test_rig = { path = "testing/eth1_test_rig" } @@ -211,9 +248,10 @@ fork_choice = { path = "consensus/fork_choice" } genesis = { path = "beacon_node/genesis" } gossipsub = { path = "beacon_node/lighthouse_network/gossipsub/" } http_api = { path = "beacon_node/http_api" } +initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } kzg = { path = "crypto/kzg" } -lighthouse_metrics = { path = "common/lighthouse_metrics" } +metrics = { path = "common/metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } lockfile = { path = "common/lockfile" } @@ -228,18 +266,26 @@ pretty_reqwest_error = { path = "common/pretty_reqwest_error" } proto_array = { path = "consensus/proto_array" } safe_arith = { path = "consensus/safe_arith" } sensitive_url = { path = "common/sensitive_url" } +signing_method = { path = "validator_client/signing_method" } slasher = { path = "slasher", default-features = false } slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } +system_health = { path = "common/system_health" } task_executor = { path = "common/task_executor" } types = { path = "consensus/types" } unused_port = { path = "common/unused_port" } validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } +validator_http_api = { path = "validator_client/http_api" } +validator_http_metrics = { path = "validator_client/http_metrics" } +validator_metrics = { path = "validator_client/validator_metrics" } +validator_store = { path = "validator_client/validator_store" } warp_utils = { path = "common/warp_utils" } +xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" } +zstd = "0.13" [profile.maxperf] inherits = "release" diff --git a/FUNDING.json b/FUNDING.json index 5001999927..3164f351be 100644 --- a/FUNDING.json +++ b/FUNDING.json @@ -2,6 +2,13 @@ "drips": { "ethereum": { "ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b" + }, + "filecoin": { + "ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b" } + }, + "opRetro": { + "projectId": "0x04b1cd5a7c59117474ce414b309fa48e985bdaab4b0dab72045f74d04ebd8cff" } -} \ No newline at end of file +} + diff --git a/Makefile b/Makefile index 32665d43ae..8faf8a2e54 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-exec-engine: # test vectors. test: test-release -# Updates the CLI help text pages in the Lighthouse book, building with Docker. +# Updates the CLI help text pages in the Lighthouse book, building with Docker (primarily for Windows users). cli: docker run --rm --user=root \ -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ @@ -220,6 +220,10 @@ lint: lint-fix: EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint +# Also run the lints on the optimized-only tests +lint-full: + RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum @@ -240,7 +244,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit + cargo audit --ignore RUSTSEC-2024-0421 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/README.md b/README.md index 4b22087bcd..147a06e504 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Lighthouse is: - Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and excellent performance (comparable to C++). - Funded by various organisations, including Sigma Prime, the - Ethereum Foundation, ConsenSys, the Decentralization Foundation and private individuals. + Ethereum Foundation, Consensys, the Decentralization Foundation and private individuals. - Actively involved in the specification and security analysis of the Ethereum proof-of-stake consensus specification. diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 7f2fa05a88..a7752d621f 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -8,25 +8,26 @@ authors = [ edition = { workspace = true } [dependencies] +account_utils = { workspace = true } bls = { workspace = true } clap = { workspace = true } -types = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_keystore = { workspace = true } +eth2_network_config = { workspace = true } eth2_wallet = { workspace = true } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } -validator_dir = { workspace = true } -tokio = { workspace = true } -eth2_keystore = { workspace = true } -account_utils = { workspace = true } -slashing_protection = { workspace = true } -eth2 = { workspace = true } -safe_arith = { workspace = true } -slot_clock = { workspace = true } filesystem = { workspace = true } +safe_arith = { workspace = true } sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index ec5af1e2ec..73e0ad54d4 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -294,7 +294,7 @@ pub fn read_wallet_password_from_cli( eprintln!(); eprintln!("{}", WALLET_PASSWORD_PROMPT); let password = - PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + PlainText::from(read_password_from_user(stdin_inputs)?.as_bytes().to_vec()); Ok(password) } } diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 3fb0e50d22..ea1a24da1f 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -409,6 +409,6 @@ mod tests { ) .unwrap(); - assert_eq!(expected_pk, kp.pk.into()); + assert_eq!(expected_pk, kp.pk); } } diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 19ab5ad60a..4d2353b553 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -7,7 +7,7 @@ use account_utils::{ recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, }, - ZeroizeString, STDIN_INPUTS_FLAG, + STDIN_INPUTS_FLAG, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; @@ -16,6 +16,7 @@ use std::fs; use std::path::PathBuf; use std::thread::sleep; use std::time::Duration; +use zeroize::Zeroizing; pub const CMD: &str = "import"; pub const KEYSTORE_FLAG: &str = "keystore"; @@ -148,7 +149,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin // Skip keystores that already exist, but exit early if any operation fails. // Reuses the same password for all keystores if the `REUSE_PASSWORD_FLAG` flag is set. let mut num_imported_keystores = 0; - let mut previous_password: Option = None; + let mut previous_password: Option> = None; for src_keystore in &keystore_paths { let keystore = Keystore::from_json_file(src_keystore) @@ -182,14 +183,17 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password = match keystore_password_path.as_ref() { Some(path) => { - let password_from_file: ZeroizeString = fs::read_to_string(path) + let password_from_file: Zeroizing = fs::read_to_string(path) .map_err(|e| format!("Unable to read {:?}: {:?}", path, e))? .into(); - password_from_file.without_newlines() + password_from_file + .trim_end_matches(['\r', '\n']) + .to_string() + .into() } None => { let password_from_user = read_password_from_user(stdin_inputs)?; - if password_from_user.as_ref().is_empty() { + if password_from_user.is_empty() { eprintln!("Continuing without password."); sleep(Duration::from_secs(1)); // Provides nicer UX. break None; @@ -314,7 +318,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin /// Otherwise, returns the keystore error. fn check_password_on_keystore( keystore: &Keystore, - password: &ZeroizeString, + password: &Zeroizing, ) -> Result { match keystore.decrypt_keypair(password.as_ref()) { Ok(_) => { diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index b22007050f..6369646929 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -226,14 +226,14 @@ pub fn read_new_wallet_password_from_cli( eprintln!(); eprintln!("{}", NEW_WALLET_PASSWORD_PROMPT); let password = - PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + PlainText::from(read_password_from_user(stdin_inputs)?.as_bytes().to_vec()); // Ensure the password meets the minimum requirements. match is_password_sufficiently_complex(password.as_bytes()) { Ok(_) => { eprintln!("{}", RETYPE_PASSWORD_PROMPT); let retyped_password = - PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + PlainText::from(read_password_from_user(stdin_inputs)?.as_bytes().to_vec()); if retyped_password == password { break Ok(password); } else { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index bb946e3c5a..7da65ad742 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.3.0" +version = "6.0.1" authors = [ "Paul Hauner ", "Age Manning (blob_count, &spec); + let (signed_block, blobs) = create_test_block_and_blobs::(blob_count, &spec); - let column_sidecars = - blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, &kzg.clone(), &spec) - .unwrap(); + let column_sidecars = blobs_to_data_column_sidecars( + &blobs.iter().collect::>(), + &signed_block, + &kzg, + &spec, + ) + .unwrap(); let spec = spec.clone(); diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 9ee0b01df3..c3dea3dbb4 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -306,7 +306,7 @@ pub struct VerifiedAggregatedAttestation<'a, T: BeaconChainTypes> { indexed_attestation: IndexedAttestation, } -impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { +impl VerifiedAggregatedAttestation<'_, T> { pub fn into_indexed_attestation(self) -> IndexedAttestation { self.indexed_attestation } @@ -319,7 +319,7 @@ pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> { subnet_id: SubnetId, } -impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { +impl VerifiedUnaggregatedAttestation<'_, T> { pub fn into_indexed_attestation(self) -> IndexedAttestation { self.indexed_attestation } @@ -327,7 +327,7 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive /// macro. -impl<'a, T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'a, T> { +impl Clone for IndexedUnaggregatedAttestation<'_, T> { fn clone(&self) -> Self { Self { attestation: self.attestation, @@ -353,7 +353,7 @@ pub trait VerifiedAttestation: Sized { } } -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttestation<'a, T> { +impl VerifiedAttestation for VerifiedAggregatedAttestation<'_, T> { fn attestation(&self) -> AttestationRef { self.attestation() } @@ -363,7 +363,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttes } } -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregatedAttestation<'a, T> { +impl VerifiedAttestation for VerifiedUnaggregatedAttestation<'_, T> { fn attestation(&self) -> AttestationRef { self.attestation } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d41bf7bd21..7155547d01 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -34,7 +34,6 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; -use crate::historical_blocks::HistoricalBlockError; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, }; @@ -89,7 +88,7 @@ use kzg::Kzg; use operation_pool::{ CompactAttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella, }; -use parking_lot::{Mutex, RwLock}; +use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; use slasher::Slasher; @@ -121,6 +120,7 @@ use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::sync::mpsc::Receiver; use tokio_stream::Stream; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; @@ -755,12 +755,10 @@ impl BeaconChain { ) -> Result> + '_, Error> { let oldest_block_slot = self.store.get_oldest_block_slot(); if start_slot < oldest_block_slot { - return Err(Error::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot: start_slot, - oldest_block_slot, - }, - )); + return Err(Error::HistoricalBlockOutOfRange { + slot: start_slot, + oldest_block_slot, + }); } let local_head = self.head_snapshot(); @@ -769,7 +767,6 @@ impl BeaconChain { start_slot, local_head.beacon_state.clone(), local_head.beacon_block_root, - &self.spec, )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -785,21 +782,18 @@ impl BeaconChain { ) -> Result> + '_, Error> { let oldest_block_slot = self.store.get_oldest_block_slot(); if start_slot < oldest_block_slot { - return Err(Error::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot: start_slot, - oldest_block_slot, - }, - )); + return Err(Error::HistoricalBlockOutOfRange { + slot: start_slot, + oldest_block_slot, + }); } self.with_head(move |head| { - let iter = self.store.forwards_block_roots_iterator_until( - start_slot, - end_slot, - || Ok((head.beacon_state.clone(), head.beacon_block_root)), - &self.spec, - )?; + let iter = + self.store + .forwards_block_roots_iterator_until(start_slot, end_slot, || { + Ok((head.beacon_state.clone(), head.beacon_block_root)) + })?; Ok(iter .map(|result| result.map_err(Into::into)) .take_while(move |result| { @@ -869,7 +863,6 @@ impl BeaconChain { start_slot, local_head.beacon_state_root(), local_head.beacon_state.clone(), - &self.spec, )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -886,12 +879,11 @@ impl BeaconChain { end_slot: Slot, ) -> Result> + '_, Error> { self.with_head(move |head| { - let iter = self.store.forwards_state_roots_iterator_until( - start_slot, - end_slot, - || Ok((head.beacon_state.clone(), head.beacon_state_root())), - &self.spec, - )?; + let iter = + self.store + .forwards_state_roots_iterator_until(start_slot, end_slot, || { + Ok((head.beacon_state.clone(), head.beacon_state_root())) + })?; Ok(iter .map(|result| result.map_err(Into::into)) .take_while(move |result| { @@ -991,7 +983,7 @@ impl BeaconChain { WhenSlotSkipped::Prev => self.block_root_at_slot_skips_prev(request_slot), } .or_else(|e| match e { - Error::HistoricalBlockError(_) => Ok(None), + Error::HistoricalBlockOutOfRange { .. } => Ok(None), e => Err(e), }) } @@ -1120,6 +1112,7 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. + #[allow(clippy::type_complexity)] pub fn get_blocks_checking_caches( self: &Arc, block_roots: Vec, @@ -1135,6 +1128,7 @@ impl BeaconChain { Ok(BeaconBlockStreamer::::new(self, CheckCaches::Yes)?.launch_stream(block_roots)) } + #[allow(clippy::type_complexity)] pub fn get_blocks( self: &Arc, block_roots: Vec, @@ -2976,7 +2970,6 @@ impl BeaconChain { pub async fn process_gossip_blob( self: &Arc, blob: GossipVerifiedBlob, - publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let block_root = blob.block_root(); @@ -2995,17 +2988,9 @@ impl BeaconChain { return Err(BlockError::BlobNotRequired(blob.slot())); } - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_blob_sidecar_subscribers() { - event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar( - blob.as_blob(), - ))); - } - } + self.emit_sse_blob_sidecar_events(&block_root, std::iter::once(blob.as_blob())); - let r = self - .check_gossip_blob_availability_and_import(blob, publish_fn) - .await; + let r = self.check_gossip_blob_availability_and_import(blob).await; self.remove_notified(&block_root, r) } @@ -3083,20 +3068,63 @@ impl BeaconChain { } } + self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); + + let r = self + .check_rpc_blob_availability_and_import(slot, block_root, blobs) + .await; + self.remove_notified(&block_root, r) + } + + /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. + /// + /// `data_column_recv`: An optional receiver for `DataColumnSidecarList`. + /// If PeerDAS is enabled, this receiver will be provided and used to send + /// the `DataColumnSidecar`s once they have been successfully computed. + pub async fn process_engine_blobs( + self: &Arc, + slot: Slot, + block_root: Hash256, + blobs: FixedBlobSidecarList, + data_column_recv: Option>>, + ) -> Result { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its blobs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); + + let r = self + .check_engine_blob_availability_and_import(slot, block_root, blobs, data_column_recv) + .await; + self.remove_notified(&block_root, r) + } + + fn emit_sse_blob_sidecar_events<'a, I>(self: &Arc, block_root: &Hash256, blobs_iter: I) + where + I: Iterator>, + { if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_blob_sidecar_subscribers() { - for blob in blobs.iter().filter_map(|maybe_blob| maybe_blob.as_ref()) { + let imported_blobs = self + .data_availability_checker + .cached_blob_indexes(block_root) + .unwrap_or_default(); + let new_blobs = blobs_iter.filter(|b| !imported_blobs.contains(&b.index)); + + for blob in new_blobs { event_handler.register(EventKind::BlobSidecar( SseBlobSidecar::from_blob_sidecar(blob), )); } } } - - let r = self - .check_rpc_blob_availability_and_import(slot, block_root, blobs) - .await; - self.remove_notified(&block_root, r) } /// Cache the columns in the processing cache, process it, then evict it from the cache if it was @@ -3186,7 +3214,7 @@ impl BeaconChain { }; let r = self - .process_availability(slot, availability, || Ok(())) + .process_availability(slot, availability, None, || Ok(())) .await; self.remove_notified(&block_root, r) .map(|availability_processing_status| { @@ -3314,7 +3342,7 @@ impl BeaconChain { match executed_block { ExecutedBlock::Available(block) => { - self.import_available_block(Box::new(block)).await + self.import_available_block(Box::new(block), None).await } ExecutedBlock::AvailabilityPending(block) => { self.check_block_availability_and_import(block).await @@ -3446,7 +3474,7 @@ impl BeaconChain { let availability = self .data_availability_checker .put_pending_executed_block(block)?; - self.process_availability(slot, availability, || Ok(())) + self.process_availability(slot, availability, None, || Ok(())) .await } @@ -3455,7 +3483,6 @@ impl BeaconChain { async fn check_gossip_blob_availability_and_import( self: &Arc, blob: GossipVerifiedBlob, - publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let slot = blob.slot(); if let Some(slasher) = self.slasher.as_ref() { @@ -3463,7 +3490,7 @@ impl BeaconChain { } let availability = self.data_availability_checker.put_gossip_blob(blob)?; - self.process_availability(slot, availability, publish_fn) + self.process_availability(slot, availability, None, || Ok(())) .await } @@ -3482,16 +3509,41 @@ impl BeaconChain { } } - let availability = self.data_availability_checker.put_gossip_data_columns( - slot, - block_root, - data_columns, - )?; + let availability = self + .data_availability_checker + .put_gossip_data_columns(block_root, data_columns)?; - self.process_availability(slot, availability, publish_fn) + self.process_availability(slot, availability, None, publish_fn) .await } + fn check_blobs_for_slashability( + self: &Arc, + block_root: Hash256, + blobs: &FixedBlobSidecarList, + ) -> Result<(), BlockError> { + let mut slashable_cache = self.observed_slashable.write(); + for header in blobs + .iter() + .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) + .unique() + { + if verify_header_signature::(self, &header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header); + } + } + } + Ok(()) + } + /// Checks if the provided blobs can make any cached blocks available, and imports immediately /// if so, otherwise caches the blob in the data availability checker. async fn check_rpc_blob_availability_and_import( @@ -3500,35 +3552,28 @@ impl BeaconChain { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result { - // Need to scope this to ensure the lock is dropped before calling `process_availability` - // Even an explicit drop is not enough to convince the borrow checker. - { - let mut slashable_cache = self.observed_slashable.write(); - for header in blobs - .iter() - .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) - .unique() - { - if verify_header_signature::(self, &header).is_ok() { - slashable_cache - .observe_slashable( - header.message.slot, - header.message.proposer_index, - block_root, - ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - if let Some(slasher) = self.slasher.as_ref() { - slasher.accept_block_header(header); - } - } - } - } - let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + self.check_blobs_for_slashability(block_root, &blobs)?; let availability = self .data_availability_checker - .put_rpc_blobs(block_root, epoch, blobs)?; + .put_rpc_blobs(block_root, blobs)?; - self.process_availability(slot, availability, || Ok(())) + self.process_availability(slot, availability, None, || Ok(())) + .await + } + + async fn check_engine_blob_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + blobs: FixedBlobSidecarList, + data_column_recv: Option>>, + ) -> Result { + self.check_blobs_for_slashability(block_root, &blobs)?; + let availability = self + .data_availability_checker + .put_engine_blobs(block_root, blobs)?; + + self.process_availability(slot, availability, data_column_recv, || Ok(())) .await } @@ -3564,13 +3609,11 @@ impl BeaconChain { // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. - let availability = self.data_availability_checker.put_rpc_custody_columns( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - custody_columns, - )?; + let availability = self + .data_availability_checker + .put_rpc_custody_columns(block_root, custody_columns)?; - self.process_availability(slot, availability, || Ok(())) + self.process_availability(slot, availability, None, || Ok(())) .await } @@ -3582,13 +3625,14 @@ impl BeaconChain { self: &Arc, slot: Slot, availability: Availability, + recv: Option>>, publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { match availability { Availability::Available(block) => { publish_fn()?; // Block is fully available, import into fork choice - self.import_available_block(block).await + self.import_available_block(block, recv).await } Availability::MissingComponents(block_root) => Ok( AvailabilityProcessingStatus::MissingComponents(slot, block_root), @@ -3599,6 +3643,7 @@ impl BeaconChain { pub async fn import_available_block( self: &Arc, block: Box>, + data_column_recv: Option>>, ) -> Result { let AvailableExecutedBlock { block, @@ -3640,6 +3685,7 @@ impl BeaconChain { parent_block, parent_eth1_finalization_data, consensus_context, + data_column_recv, ) }, "payload_verification_handle", @@ -3678,6 +3724,7 @@ impl BeaconChain { parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, + data_column_recv: Option>>, ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- // Everything in this initial section is on the hot path between processing the block and @@ -3823,7 +3870,6 @@ impl BeaconChain { // state if we returned early without committing. In other words, an error here would // corrupt the node's database permanently. // ----------------------------------------------------------------------------------------- - self.import_block_update_shuffling_cache(block_root, &mut state); self.import_block_observe_attestations( block, @@ -3840,15 +3886,53 @@ impl BeaconChain { ); self.import_block_update_slasher(block, &state, &mut consensus_context); - let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); - // Store the block and its state, and execute the confirmation batch for the intermediate // states, which will delete their temporary flags. // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 let (_, signed_block, blobs, data_columns) = signed_block.deconstruct(); + // TODO(das) we currently store all subnet sampled columns. Tracking issue to exclude non + // custody columns: https://github.com/sigp/lighthouse/issues/6465 + let custody_columns_count = self.data_availability_checker.get_sampling_column_count(); + // if block is made available via blobs, dropped the data columns. + let data_columns = data_columns.filter(|columns| columns.len() == custody_columns_count); + + let data_columns = match (data_columns, data_column_recv) { + // If the block was made available via custody columns received from gossip / rpc, use them + // since we already have them. + (Some(columns), _) => Some(columns), + // Otherwise, it means blobs were likely available via fetching from EL, in this case we + // wait for the data columns to be computed (blocking). + (None, Some(mut data_column_recv)) => { + let _column_recv_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_DATA_COLUMNS_WAIT); + // Unable to receive data columns from sender, sender is either dropped or + // failed to compute data columns from blobs. We restore fork choice here and + // return to avoid inconsistency in database. + if let Some(columns) = data_column_recv.blocking_recv() { + Some(columns) + } else { + let err_msg = "Did not receive data columns from sender"; + error!( + self.log, + "Failed to store data columns into the database"; + "msg" => "Restoring fork choice from disk", + "error" => err_msg, + ); + return Err(self + .handle_import_block_db_write_error(fork_choice) + .err() + .unwrap_or(BlockError::InternalError(err_msg.to_string()))); + } + } + // No data columns present and compute data columns task was not spawned. + // Could either be no blobs in the block or before PeerDAS activation. + (None, None) => None, + }; + let block = signed_block.message(); + let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); ops.extend( confirmed_state_roots .into_iter() @@ -3890,33 +3974,10 @@ impl BeaconChain { "msg" => "Restoring fork choice from disk", "error" => ?e, ); - - // Clear the early attester cache to prevent attestations which we would later be unable - // to verify due to the failure. - self.early_attester_cache.clear(); - - // Since the write failed, try to revert the canonical head back to what was stored - // in the database. This attempts to prevent inconsistency between the database and - // fork choice. - if let Err(e) = self.canonical_head.restore_from_store( - fork_choice, - ResetPayloadStatuses::always_reset_conditionally( - self.config.always_reset_payload_statuses, - ), - &self.store, - &self.spec, - &self.log, - ) { - crit!( - self.log, - "No stored fork choice found to restore from"; - "error" => ?e, - "warning" => "The database is likely corrupt now, consider --purge-db" - ); - return Err(BlockError::BeaconChainError(e)); - } - - return Err(e.into()); + return Err(self + .handle_import_block_db_write_error(fork_choice) + .err() + .unwrap_or(e.into())); } drop(txn_lock); @@ -3984,6 +4045,41 @@ impl BeaconChain { Ok(block_root) } + fn handle_import_block_db_write_error( + &self, + // We don't actually need this value, however it's always present when we call this function + // and it needs to be dropped to prevent a dead-lock. Requiring it to be passed here is + // defensive programming. + fork_choice_write_lock: RwLockWriteGuard>, + ) -> Result<(), BlockError> { + // Clear the early attester cache to prevent attestations which we would later be unable + // to verify due to the failure. + self.early_attester_cache.clear(); + + // Since the write failed, try to revert the canonical head back to what was stored + // in the database. This attempts to prevent inconsistency between the database and + // fork choice. + if let Err(e) = self.canonical_head.restore_from_store( + fork_choice_write_lock, + ResetPayloadStatuses::always_reset_conditionally( + self.config.always_reset_payload_statuses, + ), + &self.store, + &self.spec, + &self.log, + ) { + crit!( + self.log, + "No stored fork choice found to restore from"; + "error" => ?e, + "warning" => "The database is likely corrupt now, consider --purge-db" + ); + Err(BlockError::BeaconChainError(e)) + } else { + Ok(()) + } + } + /// Check block's consistentency with any configured weak subjectivity checkpoint. fn check_block_against_weak_subjectivity_checkpoint( &self, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 935f68728f..786b627bb7 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,5 +1,6 @@ use derivative::Derivative; use slot_clock::SlotClock; +use std::marker::PhantomData; use std::sync::Arc; use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -8,6 +9,7 @@ use crate::block_verification::{ BlockSlashInfo, }; use crate::kzg_utils::{validate_blob, validate_blobs}; +use crate::observed_data_sidecars::{DoNotObserve, ObservationStrategy, Observe}; use crate::{metrics, BeaconChainError}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; use slog::debug; @@ -16,8 +18,7 @@ use std::time::Duration; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ - BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, RuntimeVariableList, - SignedBeaconBlockHeader, Slot, + BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip blob. @@ -156,17 +157,16 @@ impl From for GossipBlobError { } } -pub type GossipVerifiedBlobList = RuntimeVariableList>; - /// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on /// the p2p network. #[derive(Debug)] -pub struct GossipVerifiedBlob { +pub struct GossipVerifiedBlob { block_root: Hash256, blob: KzgVerifiedBlob, + _phantom: PhantomData, } -impl GossipVerifiedBlob { +impl GossipVerifiedBlob { pub fn new( blob: Arc>, subnet_id: u64, @@ -175,7 +175,7 @@ impl GossipVerifiedBlob { let header = blob.signed_block_header.clone(); // We only process slashing info if the gossip verification failed // since we do not process the blob any further in that case. - validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| { + validate_blob_sidecar_for_gossip::(blob, subnet_id, chain).map_err(|e| { process_block_slash_info::<_, GossipBlobError>( chain, BlockSlashInfo::from_early_error_blob(header, e), @@ -192,6 +192,7 @@ impl GossipVerifiedBlob { blob, seen_timestamp: Duration::from_secs(0), }, + _phantom: PhantomData, } } pub fn id(&self) -> BlobIdentifier { @@ -332,6 +333,25 @@ impl KzgVerifiedBlobList { verified_blobs: blobs, }) } + + /// Create a `KzgVerifiedBlobList` from `blobs` that are already KZG verified. + /// + /// This should be used with caution, as used incorrectly it could result in KZG verification + /// being skipped and invalid blobs being deemed valid. + pub fn from_verified>>>( + blobs: I, + seen_timestamp: Duration, + ) -> Self { + Self { + verified_blobs: blobs + .into_iter() + .map(|blob| KzgVerifiedBlob { + blob, + seen_timestamp, + }) + .collect(), + } + } } impl IntoIterator for KzgVerifiedBlobList { @@ -361,11 +381,11 @@ where validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) } -pub fn validate_blob_sidecar_for_gossip( +pub fn validate_blob_sidecar_for_gossip( blob_sidecar: Arc>, subnet: u64, chain: &BeaconChain, -) -> Result, GossipBlobError> { +) -> Result, GossipBlobError> { let blob_slot = blob_sidecar.slot(); let blob_index = blob_sidecar.index; let block_parent_root = blob_sidecar.block_parent_root(); @@ -565,16 +585,45 @@ pub fn validate_blob_sidecar_for_gossip( ) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))?; + if O::observe() { + observe_gossip_blob(&kzg_verified_blob.blob, chain)?; + } + + Ok(GossipVerifiedBlob { + block_root, + blob: kzg_verified_blob, + _phantom: PhantomData, + }) +} + +impl GossipVerifiedBlob { + pub fn observe( + self, + chain: &BeaconChain, + ) -> Result, GossipBlobError> { + observe_gossip_blob(&self.blob.blob, chain)?; + Ok(GossipVerifiedBlob { + block_root: self.block_root, + blob: self.blob, + _phantom: PhantomData, + }) + } +} + +fn observe_gossip_blob( + blob_sidecar: &BlobSidecar, + chain: &BeaconChain, +) -> Result<(), GossipBlobError> { // Now the signature is valid, store the proposal so we don't accept another blob sidecar - // with the same `BlobIdentifier`. - // It's important to double-check that the proposer still hasn't been observed so we don't - // have a race-condition when verifying two blocks simultaneously. + // with the same `BlobIdentifier`. It's important to double-check that the proposer still + // hasn't been observed so we don't have a race-condition when verifying two blocks + // simultaneously. // - // Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache - // as alternate blob_sidecars for the same identifier can still be retrieved - // over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow - // retrieval of potentially valid blocks over rpc, but try to punish the proposer for signing - // invalid messages. Issue for more background + // Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the + // seen_cache as alternate blob_sidecars for the same identifier can still be retrieved over + // rpc. Evicting them from this cache would allow faster propagation over gossip. So we + // allow retrieval of potentially valid blocks over rpc, but try to punish the proposer for + // signing invalid messages. Issue for more background // https://github.com/ethereum/consensus-specs/issues/3261 if chain .observed_blob_sidecars @@ -583,16 +632,12 @@ pub fn validate_blob_sidecar_for_gossip( .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { - proposer: proposer_index as u64, - slot: blob_slot, - index: blob_index, + proposer: blob_sidecar.block_proposer_index(), + slot: blob_sidecar.slot(), + index: blob_sidecar.index, }); } - - Ok(GossipVerifiedBlob { - block_root, - blob: kzg_verified_blob, - }) + Ok(()) } /// Returns the canonical root of the given `blob`. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 661b539fbe..ddb7bb614a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -70,7 +70,7 @@ use derivative::Derivative; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; -use lighthouse_metrics::TryExt; +use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; @@ -92,6 +92,7 @@ use std::fs; use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use strum::AsRefStr; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, @@ -137,7 +138,7 @@ const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); /// /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. /// - We encountered an error whilst trying to verify the block (a `BeaconChainError`). -#[derive(Debug)] +#[derive(Debug, AsRefStr)] pub enum BlockError { /// The parent block was unknown. /// @@ -683,7 +684,7 @@ pub struct SignatureVerifiedBlock { consensus_context: ConsensusContext, } -/// Used to await the result of executing payload with a remote EE. +/// Used to await the result of executing payload with an EE. type PayloadVerificationHandle = JoinHandle>>; /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and @@ -750,7 +751,8 @@ pub fn build_blob_data_column_sidecars( &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, &[&blobs.len().to_string()], ); - let sidecars = blobs_to_data_column_sidecars(&blobs, block, &chain.kzg, &chain.spec) + let blob_refs = blobs.iter().collect::>(); + let sidecars = blobs_to_data_column_sidecars(&blob_refs, block, &chain.kzg, &chain.spec) .discard_timer_on_break(&mut timer)?; drop(timer); Ok(sidecars) @@ -838,9 +840,6 @@ impl GossipVerifiedBlock { let block_root = get_block_header_root(block_header); - // Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any. - check_block_against_anchor_slot(block.message(), chain)?; - // Do not gossip a block from a finalized slot. check_block_against_finalized_slot(block.message(), block_root, chain)?; @@ -1073,9 +1072,6 @@ impl SignatureVerifiedBlock { .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; - // Check the anchor slot before loading the parent, to avoid spurious lookups. - check_block_against_anchor_slot(block.message(), chain)?; - let (mut parent, block) = load_parent(block, chain)?; let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( @@ -1343,7 +1339,6 @@ impl ExecutionPendingBlock { /* * Perform cursory checks to see if the block is even worth processing. */ - check_block_relevancy(block.as_block(), block_root, chain)?; // Define a future that will verify the execution payload with an execution engine. @@ -1688,19 +1683,6 @@ impl ExecutionPendingBlock { } } -/// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any). -fn check_block_against_anchor_slot( - block: BeaconBlockRef<'_, T::EthSpec>, - chain: &BeaconChain, -) -> Result<(), BlockError> { - if let Some(anchor_slot) = chain.store.get_anchor_slot() { - if block.slot() <= anchor_slot { - return Err(BlockError::WeakSubjectivityConflict); - } - } - Ok(()) -} - /// Returns `Ok(())` if the block is later than the finalized slot on `chain`. /// /// Returns an error if the block is earlier or equal to the finalized slot, or there was an error @@ -2091,6 +2073,7 @@ pub fn get_validator_pubkey_cache( /// /// The signature verifier is empty because it does not yet have any of this block's signatures /// added to it. Use `Self::apply_to_signature_verifier` to apply the signatures. +#[allow(clippy::type_complexity)] fn get_signature_verifier<'a, T: BeaconChainTypes>( state: &'a BeaconState, validator_pubkey_cache: &'a ValidatorPubkeyCache, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5f1e94fc8c..9d99ff9d8e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -363,6 +363,10 @@ where store .put_block(&beacon_block_root, beacon_block.clone()) .map_err(|e| format!("Failed to store genesis block: {:?}", e))?; + store + .store_frozen_block_root_at_skip_slots(Slot::new(0), Slot::new(1), beacon_block_root) + .and_then(|ops| store.cold_db.do_atomically(ops)) + .map_err(|e| format!("Failed to store genesis block root: {e:?}"))?; // Store the genesis block under the `ZERO_HASH` key. store @@ -1033,7 +1037,9 @@ where ); // Check for states to reconstruct (in the background). - if beacon_chain.config.reconstruct_historic_states { + if beacon_chain.config.reconstruct_historic_states + && beacon_chain.store.get_oldest_block_slot() == 0 + { beacon_chain.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 20edfbf31a..b8a607c886 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -88,6 +88,12 @@ pub struct ChainConfig { pub malicious_withhold_count: usize, /// Enable peer sampling on blocks. pub enable_sampling: bool, + /// Number of batches that the node splits blobs or data columns into during publication. + /// This doesn't apply if the node is the block proposer. For PeerDAS only. + pub blob_publication_batches: usize, + /// The delay in milliseconds applied by the node between sending each blob or data column batch. + /// This doesn't apply if the node is the block proposer. + pub blob_publication_batch_interval: Duration, } impl Default for ChainConfig { @@ -121,6 +127,8 @@ impl Default for ChainConfig { enable_light_client_server: false, malicious_withhold_count: 0, enable_sampling: false, + blob_publication_batches: 4, + blob_publication_batch_interval: Duration::from_millis(300), } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index d43decfa88..83de995ddc 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -18,7 +18,7 @@ use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, ChainSpec, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList, - Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, }; mod error; @@ -146,6 +146,10 @@ impl DataAvailabilityChecker { self.availability_cache.sampling_column_count() } + pub(crate) fn is_supernode(&self) -> bool { + self.get_sampling_column_count() == self.spec.number_of_columns + } + /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. pub fn get_execution_valid_block( @@ -201,7 +205,6 @@ impl DataAvailabilityChecker { pub fn put_rpc_blobs( &self, block_root: Hash256, - epoch: Epoch, blobs: FixedBlobSidecarList, ) -> Result, AvailabilityCheckError> { let seen_timestamp = self @@ -220,7 +223,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, epoch, verified_blobs, &self.log) + .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -229,7 +232,6 @@ impl DataAvailabilityChecker { pub fn put_rpc_custody_columns( &self, block_root: Hash256, - epoch: Epoch, custody_columns: DataColumnSidecarList, ) -> Result, AvailabilityCheckError> { // TODO(das): report which column is invalid for proper peer scoring @@ -248,12 +250,32 @@ impl DataAvailabilityChecker { self.availability_cache.put_kzg_verified_data_columns( block_root, - epoch, verified_custody_columns, &self.log, ) } + /// Put a list of blobs received from the EL pool into the availability cache. + /// + /// This DOES NOT perform KZG verification because the KZG proofs should have been constructed + /// immediately prior to calling this function so they are assumed to be valid. + pub fn put_engine_blobs( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result, AvailabilityCheckError> { + let seen_timestamp = self + .slot_clock + .now_duration() + .ok_or(AvailabilityCheckError::SlotClockError)?; + + let verified_blobs = + KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp); + + self.availability_cache + .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -265,7 +287,6 @@ impl DataAvailabilityChecker { ) -> Result, AvailabilityCheckError> { self.availability_cache.put_kzg_verified_blobs( gossip_blob.block_root(), - gossip_blob.epoch(), vec![gossip_blob.into_inner()], &self.log, ) @@ -279,12 +300,9 @@ impl DataAvailabilityChecker { #[allow(clippy::type_complexity)] pub fn put_gossip_data_columns( &self, - slot: Slot, block_root: Hash256, gossip_data_columns: Vec>, ) -> Result, AvailabilityCheckError> { - let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - let custody_columns = gossip_data_columns .into_iter() .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) @@ -292,7 +310,6 @@ impl DataAvailabilityChecker { self.availability_cache.put_kzg_verified_data_columns( block_root, - epoch, custody_columns, &self.log, ) @@ -594,12 +611,7 @@ impl DataAvailabilityChecker { ); self.availability_cache - .put_kzg_verified_data_columns( - *block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - data_columns_to_publish.clone(), - &self.log, - ) + .put_kzg_verified_data_columns(*block_root, data_columns_to_publish.clone(), &self.log) .map(|availability| { DataColumnReconstructionResult::Success(( availability, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index dbfa00e6e2..cfdb3cfe91 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -10,7 +10,6 @@ pub enum Error { blob_commitment: KzgCommitment, block_commitment: KzgCommitment, }, - UnableToDetermineImportRequirement, Unexpected, SszTypes(ssz_types::Error), MissingBlobs, @@ -44,7 +43,6 @@ impl Error { | Error::Unexpected | Error::ParentStateMissing(_) | Error::BlockReplayError(_) - | Error::UnableToDetermineImportRequirement | Error::RebuildingStateCaches(_) | Error::SlotClockError => ErrorCategory::Internal, Error::InvalidBlobs { .. } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 29fe2107c8..d5351a5dc2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -34,11 +34,6 @@ pub struct PendingComponents { pub reconstruction_started: bool, } -pub enum BlockImportRequirement { - AllBlobs, - ColumnSampling(usize), -} - impl PendingComponents { /// Returns an immutable reference to the cached block. pub fn get_cached_block(&self) -> &Option> { @@ -192,63 +187,49 @@ impl PendingComponents { /// /// Returns `true` if both the block exists and the number of received blobs / custody columns /// matches the number of expected blobs / custody columns. - pub fn is_available( - &self, - block_import_requirement: &BlockImportRequirement, - log: &Logger, - ) -> bool { + pub fn is_available(&self, custody_column_count: usize, log: &Logger) -> bool { let block_kzg_commitments_count_opt = self.block_kzg_commitments_count(); + let expected_blobs_msg = block_kzg_commitments_count_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); - match block_import_requirement { - BlockImportRequirement::AllBlobs => { - let received_blobs = self.num_received_blobs(); - let expected_blobs_msg = block_kzg_commitments_count_opt - .as_ref() - .map(|num| num.to_string()) - .unwrap_or("unknown".to_string()); - - debug!(log, - "Component(s) added to data availability checker"; - "block_root" => ?self.block_root, - "received_block" => block_kzg_commitments_count_opt.is_some(), - "received_blobs" => received_blobs, - "expected_blobs" => expected_blobs_msg, - ); - - block_kzg_commitments_count_opt.map_or(false, |num_expected_blobs| { - num_expected_blobs == received_blobs - }) + // No data columns when there are 0 blobs + let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { + if blob_count > 0 { + custody_column_count + } else { + 0 } - BlockImportRequirement::ColumnSampling(num_expected_columns) => { - // No data columns when there are 0 blobs - let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { - if blob_count > 0 { - *num_expected_columns - } else { - 0 - } - }); + }); + let expected_columns_msg = expected_columns_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); - let expected_columns_msg = expected_columns_opt - .as_ref() - .map(|num| num.to_string()) - .unwrap_or("unknown".to_string()); + let num_received_blobs = self.num_received_blobs(); + let num_received_columns = self.num_received_data_columns(); - let num_received_columns = self.num_received_data_columns(); + debug!( + log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_blobs" => num_received_blobs, + "expected_blobs" => expected_blobs_msg, + "received_columns" => num_received_columns, + "expected_columns" => expected_columns_msg, + ); - debug!(log, - "Component(s) added to data availability checker"; - "block_root" => ?self.block_root, - "received_block" => block_kzg_commitments_count_opt.is_some(), - "received_columns" => num_received_columns, - "expected_columns" => expected_columns_msg, - ); + let all_blobs_received = block_kzg_commitments_count_opt + .map_or(false, |num_expected_blobs| { + num_expected_blobs == num_received_blobs + }); - expected_columns_opt.map_or(false, |num_expected_columns| { - num_expected_columns == num_received_columns - }) - } - } + let all_columns_received = expected_columns_opt.map_or(false, |num_expected_columns| { + num_expected_columns == num_received_columns + }); + + all_blobs_received || all_columns_received } /// Returns an empty `PendingComponents` object with the given block root. @@ -271,7 +252,6 @@ impl PendingComponents { /// reconstructed from disk. Ensure you are not holding any write locks while calling this. pub fn make_available( self, - block_import_requirement: BlockImportRequirement, spec: &Arc, recover: R, ) -> Result, AvailabilityCheckError> @@ -298,31 +278,29 @@ impl PendingComponents { return Err(AvailabilityCheckError::Unexpected); }; - let (blobs, data_columns) = match block_import_requirement { - BlockImportRequirement::AllBlobs => { - let num_blobs_expected = diet_executed_block.num_blobs_expected(); - let Some(verified_blobs) = verified_blobs - .into_iter() - .map(|b| b.map(|b| b.to_blob())) - .take(num_blobs_expected) - .collect::>>() - else { - return Err(AvailabilityCheckError::Unexpected); - }; - let max_len = - spec.max_blobs_per_block(diet_executed_block.as_block().epoch()) as usize; - ( - Some(RuntimeVariableList::new(verified_blobs, max_len)?), - None, - ) - } - BlockImportRequirement::ColumnSampling(_) => { - let verified_data_columns = verified_data_columns - .into_iter() - .map(|d| d.into_inner()) - .collect(); - (None, Some(verified_data_columns)) - } + let is_peer_das_enabled = spec.is_peer_das_enabled_for_epoch(diet_executed_block.epoch()); + let (blobs, data_columns) = if is_peer_das_enabled { + let data_columns = verified_data_columns + .into_iter() + .map(|d| d.into_inner()) + .collect::>(); + (None, Some(data_columns)) + } else { + let num_blobs_expected = diet_executed_block.num_blobs_expected(); + let Some(verified_blobs) = verified_blobs + .into_iter() + .map(|b| b.map(|b| b.to_blob())) + .take(num_blobs_expected) + .collect::>>() + .map(Into::into) + else { + return Err(AvailabilityCheckError::Unexpected); + }; + let max_len = spec.max_blobs_per_block(diet_executed_block.as_block().epoch()) as usize; + ( + Some(RuntimeVariableList::new(verified_blobs, max_len)?), + None, + ) }; let executed_block = recover(diet_executed_block)?; @@ -364,10 +342,7 @@ impl PendingComponents { } if let Some(kzg_verified_data_column) = self.verified_data_columns.first() { - let epoch = kzg_verified_data_column - .as_data_column() - .slot() - .epoch(E::slots_per_epoch()); + let epoch = kzg_verified_data_column.as_data_column().epoch(); return Some(epoch); } @@ -474,27 +449,22 @@ impl DataAvailabilityCheckerInner { f(self.critical.read().peek(block_root)) } - fn block_import_requirement( - &self, - epoch: Epoch, - ) -> Result { - let peer_das_enabled = self.spec.is_peer_das_enabled_for_epoch(epoch); - if peer_das_enabled { - Ok(BlockImportRequirement::ColumnSampling( - self.sampling_column_count, - )) - } else { - Ok(BlockImportRequirement::AllBlobs) - } - } - pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, - epoch: Epoch, kzg_verified_blobs: I, log: &Logger, ) -> Result, AvailabilityCheckError> { + let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); + + let Some(epoch) = kzg_verified_blobs + .peek() + .map(|verified_blob| verified_blob.as_blob().epoch()) + else { + // Verified blobs list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + let mut fixed_blobs = RuntimeFixedList::new(vec![None; self.spec.max_blobs_per_block(epoch) as usize]); @@ -517,12 +487,11 @@ impl DataAvailabilityCheckerInner { // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); - let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement, log) { + if pending_components.is_available(self.sampling_column_count, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + pending_components.make_available(&self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -537,10 +506,18 @@ impl DataAvailabilityCheckerInner { >( &self, block_root: Hash256, - epoch: Epoch, kzg_verified_data_columns: I, log: &Logger, ) -> Result, AvailabilityCheckError> { + let mut kzg_verified_data_columns = kzg_verified_data_columns.into_iter().peekable(); + let Some(epoch) = kzg_verified_data_columns + .peek() + .map(|verified_blob| verified_blob.as_data_column().epoch()) + else { + // Verified data_columns list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. @@ -554,13 +531,11 @@ impl DataAvailabilityCheckerInner { // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; - let block_import_requirement = self.block_import_requirement(epoch)?; - - if pending_components.is_available(&block_import_requirement, log) { + if pending_components.is_available(self.sampling_column_count, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + pending_components.make_available(&self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -628,8 +603,8 @@ impl DataAvailabilityCheckerInner { log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); + let epoch = executed_block.as_block().epoch(); let block_root = executed_block.import_data.block_root; - let epoch = executed_block.block.epoch(); // register the block to get the diet block let diet_executed_block = self @@ -648,12 +623,11 @@ impl DataAvailabilityCheckerInner { pending_components.merge_block(diet_executed_block); // Check if we have all components and entire set is consistent. - let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement, log) { + if pending_components.is_available(self.sampling_column_count, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + pending_components.make_available(&self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -709,6 +683,7 @@ impl DataAvailabilityCheckerInner { #[cfg(test)] mod test { use super::*; + use crate::{ blob_verification::GossipVerifiedBlob, block_verification::PayloadVerificationOutcome, @@ -718,6 +693,7 @@ mod test { test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; use fork_choice::PayloadVerificationStatus; + use logging::test_logger; use slog::{info, Logger}; use state_processing::ConsensusContext; @@ -938,7 +914,6 @@ mod test { let (pending_block, blobs) = availability_pending_block(&harness).await; let root = pending_block.import_data.block_root; - let epoch = pending_block.block.epoch(); let blobs_expected = pending_block.num_blobs_expected(); assert_eq!( @@ -987,7 +962,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -1011,12 +986,11 @@ mod test { "should have expected number of blobs" ); let root = pending_block.import_data.block_root; - let epoch = pending_block.block.epoch(); let mut kzg_verified_blobs = vec![]; for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); assert_eq!( availability, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 03e3289118..5b9b7c7023 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -57,6 +57,11 @@ impl DietAvailabilityPendingExecutedBlock { .cloned() .unwrap_or_default() } + + /// Returns the epoch corresponding to `self.slot()`. + pub fn epoch(&self) -> Epoch { + self.block.slot().epoch(E::slots_per_epoch()) + } } /// This LRU cache holds BeaconStates used for block import. If the cache overflows, diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index a4e83b2751..6cfd26786a 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -3,6 +3,7 @@ use crate::block_verification::{ BlockSlashInfo, }; use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; +use crate::observed_data_sidecars::{ObservationStrategy, Observe}; use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use fork_choice::ProtoBlock; @@ -13,6 +14,7 @@ use slog::debug; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; use std::iter; +use std::marker::PhantomData; use std::sync::Arc; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ @@ -160,17 +162,16 @@ impl From for GossipDataColumnError { } } -pub type GossipVerifiedDataColumnList = RuntimeVariableList>; - /// A wrapper around a `DataColumnSidecar` that indicates it has been approved for re-gossiping on /// the p2p network. #[derive(Debug)] -pub struct GossipVerifiedDataColumn { +pub struct GossipVerifiedDataColumn { block_root: Hash256, data_column: KzgVerifiedDataColumn, + _phantom: PhantomData, } -impl GossipVerifiedDataColumn { +impl GossipVerifiedDataColumn { pub fn new( column_sidecar: Arc>, subnet_id: u64, @@ -179,12 +180,14 @@ impl GossipVerifiedDataColumn { let header = column_sidecar.signed_block_header.clone(); // We only process slashing info if the gossip verification failed // since we do not process the data column any further in that case. - validate_data_column_sidecar_for_gossip(column_sidecar, subnet_id, chain).map_err(|e| { - process_block_slash_info::<_, GossipDataColumnError>( - chain, - BlockSlashInfo::from_early_error_data_column(header, e), - ) - }) + validate_data_column_sidecar_for_gossip::(column_sidecar, subnet_id, chain).map_err( + |e| { + process_block_slash_info::<_, GossipDataColumnError>( + chain, + BlockSlashInfo::from_early_error_data_column(header, e), + ) + }, + ) } pub fn id(&self) -> DataColumnIdentifier { @@ -375,11 +378,11 @@ where Ok(()) } -pub fn validate_data_column_sidecar_for_gossip( +pub fn validate_data_column_sidecar_for_gossip( data_column: Arc>, subnet: u64, chain: &BeaconChain, -) -> Result, GossipDataColumnError> { +) -> Result, GossipDataColumnError> { let column_slot = data_column.slot(); verify_data_column_sidecar(&data_column, &chain.spec)?; verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; @@ -404,9 +407,14 @@ pub fn validate_data_column_sidecar_for_gossip( ) .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))?; + if O::observe() { + observe_gossip_data_column(&kzg_verified_data_column.data, chain)?; + } + Ok(GossipVerifiedDataColumn { block_root: data_column.block_root(), data_column: kzg_verified_data_column, + _phantom: PhantomData, }) } @@ -648,11 +656,42 @@ fn verify_sidecar_not_from_future_slot( Ok(()) } +pub fn observe_gossip_data_column( + data_column_sidecar: &DataColumnSidecar, + chain: &BeaconChain, +) -> Result<(), GossipDataColumnError> { + // Now the signature is valid, store the proposal so we don't accept another data column sidecar + // with the same `DataColumnIdentifier`. It's important to double-check that the proposer still + // hasn't been observed so we don't have a race-condition when verifying two blocks + // simultaneously. + // + // Note: If this DataColumnSidecar goes on to fail full verification, we do not evict it from the + // seen_cache as alternate data_column_sidecars for the same identifier can still be retrieved over + // rpc. Evicting them from this cache would allow faster propagation over gossip. So we + // allow retrieval of potentially valid blocks over rpc, but try to punish the proposer for + // signing invalid messages. Issue for more background + // https://github.com/ethereum/consensus-specs/issues/3261 + if chain + .observed_column_sidecars + .write() + .observe_sidecar(data_column_sidecar) + .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))? + { + return Err(GossipDataColumnError::PriorKnown { + proposer: data_column_sidecar.block_proposer_index(), + slot: data_column_sidecar.slot(), + index: data_column_sidecar.index, + }); + } + Ok(()) +} + #[cfg(test)] mod test { use crate::data_column_verification::{ validate_data_column_sidecar_for_gossip, GossipDataColumnError, }; + use crate::observed_data_sidecars::Observe; use crate::test_utils::BeaconChainHarness; use types::{DataColumnSidecar, EthSpec, ForkName, MainnetEthSpec}; @@ -691,8 +730,11 @@ mod test { .unwrap(), }; - let result = - validate_data_column_sidecar_for_gossip(column_sidecar.into(), index, &harness.chain); + let result = validate_data_column_sidecar_for_gossip::<_, Observe>( + column_sidecar.into(), + index, + &harness.chain, + ); assert!(matches!( result.err(), Some(GossipDataColumnError::UnexpectedDataColumn) diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index a26d755316..2a8fd4cd01 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -4,7 +4,6 @@ use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::data_availability_checker::AvailabilityCheckError; use crate::eth1_chain::Error as Eth1ChainError; -use crate::historical_blocks::HistoricalBlockError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; @@ -123,7 +122,11 @@ pub enum BeaconChainError { block_slot: Slot, state_slot: Slot, }, - HistoricalBlockError(HistoricalBlockError), + /// Block is not available (only returned when fetching historic blocks). + HistoricalBlockOutOfRange { + slot: Slot, + oldest_block_slot: Slot, + }, InvalidStateForShuffling { state_epoch: Epoch, shuffling_epoch: Epoch, @@ -245,7 +248,6 @@ easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); easy_from_to!(ArithError, BeaconChainError); easy_from_to!(ForkChoiceStoreError, BeaconChainError); -easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(InconsistentFork, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 276262085e..cb6e4c34f3 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -107,8 +107,7 @@ fn get_sync_status( // Determine how many voting periods are contained in distance between // now and genesis, rounding up. - let voting_periods_past = - (seconds_till_genesis + voting_period_duration - 1) / voting_period_duration; + let voting_periods_past = seconds_till_genesis.div_ceil(voting_period_duration); // Return the start time of the current voting period*. // diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f2420eea0d..92d24c53c0 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,14 +7,13 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. -use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest, - PayloadAttributes, PayloadStatus, + PayloadAttributes, PayloadParameters, PayloadStatus, }; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; @@ -284,9 +283,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); - // Store Optimistic Transition Block in Database for later Verification - OptimisticTransitionBlock::from_block(block) - .persist_in_store::(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) @@ -375,8 +371,9 @@ pub fn get_execution_payload( let timestamp = compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; - let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash(); + let latest_execution_payload_header = state.latest_execution_payload_header()?; + let latest_execution_payload_header_block_hash = latest_execution_payload_header.block_hash(); + let latest_execution_payload_header_gas_limit = latest_execution_payload_header.gas_limit(); let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { Some(get_expected_withdrawals(state, spec)?.0.into()) @@ -406,6 +403,7 @@ pub fn get_execution_payload( random, proposer_index, latest_execution_payload_header_block_hash, + latest_execution_payload_header_gas_limit, builder_params, withdrawals, parent_beacon_block_root, @@ -443,6 +441,7 @@ pub async fn prepare_execution_payload( random: Hash256, proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, + latest_execution_payload_header_gas_limit: u64, builder_params: BuilderParams, withdrawals: Option>, parent_beacon_block_root: Option, @@ -526,13 +525,20 @@ where parent_beacon_block_root, ); + let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit: latest_execution_payload_header_gas_limit, + proposer_gas_limit: target_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let block_contents = execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - fork, &chain.spec, builder_boost_factor, block_production_version, diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs new file mode 100644 index 0000000000..f1646072c9 --- /dev/null +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -0,0 +1,312 @@ +//! This module implements an optimisation to fetch blobs via JSON-RPC from the EL. +//! If a blob has already been seen in the public mempool, then it is often unnecessary to wait for +//! it to arrive on P2P gossip. This PR uses a new JSON-RPC method (`engine_getBlobsV1`) which +//! allows the CL to load the blobs quickly from the EL's blob pool. +//! +//! Once the node fetches the blobs from EL, it then publishes the remaining blobs that it hasn't seen +//! on P2P gossip to the network. From PeerDAS onwards, together with the increase in blob count, +//! broadcasting blobs requires a much higher bandwidth, and is only done by high capacity +//! supernodes. +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::observed_data_sidecars::DoNotObserve; +use crate::{metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError}; +use execution_layer::json_structures::BlobAndProofV1; +use execution_layer::Error as ExecutionLayerError; +use metrics::{inc_counter, inc_counter_by, TryExt}; +use slog::{debug, error, o, Logger}; +use ssz_types::FixedVector; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; +use std::sync::Arc; +use tokio::sync::mpsc::Receiver; +use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; +use types::{ + BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, + FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, +}; + +pub enum BlobsOrDataColumns { + Blobs(Vec>), + DataColumns(DataColumnSidecarList), +} + +#[derive(Debug)] +pub enum FetchEngineBlobError { + BeaconStateError(BeaconStateError), + BlobProcessingError(BlockError), + BlobSidecarError(BlobSidecarError), + ExecutionLayerMissing, + InternalError(String), + GossipBlob(GossipBlobError), + RequestFailed(ExecutionLayerError), + RuntimeShutdown, +} + +/// Fetches blobs from the EL mempool and processes them. It also broadcasts unseen blobs or +/// data columns (PeerDAS onwards) to the network, using the supplied `publish_fn`. +pub async fn fetch_and_process_engine_blobs( + chain: Arc>, + block_root: Hash256, + block: Arc>>, + publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, +) -> Result, FetchEngineBlobError> { + let block_root_str = format!("{:?}", block_root); + let log = chain + .log + .new(o!("service" => "fetch_engine_blobs", "block_root" => block_root_str)); + + let versioned_hashes = if let Some(kzg_commitments) = block + .message() + .body() + .blob_kzg_commitments() + .ok() + .filter(|blobs| !blobs.is_empty()) + { + kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect::>() + } else { + debug!( + log, + "Fetch blobs not triggered - none required"; + ); + return Ok(None); + }; + + let num_expected_blobs = versioned_hashes.len(); + + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + debug!( + log, + "Fetching blobs from the EL"; + "num_expected_blobs" => num_expected_blobs, + ); + let response = execution_layer + .get_blobs(versioned_hashes) + .await + .map_err(FetchEngineBlobError::RequestFailed)?; + + if response.is_empty() { + debug!( + log, + "No blobs fetched from the EL"; + "num_expected_blobs" => num_expected_blobs, + ); + inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); + return Ok(None); + } else { + inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); + } + + let (signed_block_header, kzg_commitments_proof) = block + .signed_block_header_and_kzg_commitments_proof() + .map_err(FetchEngineBlobError::BeaconStateError)?; + + let fixed_blob_sidecar_list = build_blob_sidecars( + &block, + response, + signed_block_header, + &kzg_commitments_proof, + &chain.spec, + )?; + + let num_fetched_blobs = fixed_blob_sidecar_list + .iter() + .filter(|b| b.is_some()) + .count(); + + inc_counter_by( + &metrics::BLOBS_FROM_EL_EXPECTED_TOTAL, + num_expected_blobs as u64, + ); + inc_counter_by( + &metrics::BLOBS_FROM_EL_RECEIVED_TOTAL, + num_fetched_blobs as u64, + ); + + // Gossip verify blobs before publishing. This prevents blobs with invalid KZG proofs from + // the EL making it into the data availability checker. We do not immediately add these + // blobs to the observed blobs/columns cache because we want to allow blobs/columns to arrive on gossip + // and be accepted (and propagated) while we are waiting to publish. Just before publishing + // we will observe the blobs/columns and only proceed with publishing if they are not yet seen. + let blobs_to_import_and_publish = fixed_blob_sidecar_list + .iter() + .filter_map(|opt_blob| { + let blob = opt_blob.as_ref()?; + match GossipVerifiedBlob::::new(blob.clone(), blob.index, &chain) { + Ok(verified) => Some(Ok(verified)), + // Ignore already seen blobs. + Err(GossipBlobError::RepeatBlob { .. }) => None, + Err(e) => Some(Err(e)), + } + }) + .collect::, _>>() + .map_err(FetchEngineBlobError::GossipBlob)?; + + let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + + let data_columns_receiver_opt = if peer_das_enabled { + // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. + if num_fetched_blobs != num_expected_blobs { + debug!( + log, + "Not all blobs fetched from the EL"; + "info" => "Unable to compute data columns", + "num_fetched_blobs" => num_fetched_blobs, + "num_expected_blobs" => num_expected_blobs, + ); + return Ok(None); + } + + let data_columns_receiver = spawn_compute_and_publish_data_columns_task( + &chain, + block.clone(), + fixed_blob_sidecar_list.clone(), + publish_fn, + log.clone(), + ); + + Some(data_columns_receiver) + } else { + if !blobs_to_import_and_publish.is_empty() { + publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); + } + + None + }; + + debug!( + log, + "Processing engine blobs"; + "num_fetched_blobs" => num_fetched_blobs, + ); + + let availability_processing_status = chain + .process_engine_blobs( + block.slot(), + block_root, + fixed_blob_sidecar_list.clone(), + data_columns_receiver_opt, + ) + .await + .map_err(FetchEngineBlobError::BlobProcessingError)?; + + Ok(Some(availability_processing_status)) +} + +/// Spawn a blocking task here for long computation tasks, so it doesn't block processing, and it +/// allows blobs / data columns to propagate without waiting for processing. +/// +/// An `mpsc::Sender` is then used to send the produced data columns to the `beacon_chain` for it +/// to be persisted, **after** the block is made attestable. +/// +/// The reason for doing this is to make the block available and attestable as soon as possible, +/// while maintaining the invariant that block and data columns are persisted atomically. +fn spawn_compute_and_publish_data_columns_task( + chain: &Arc>, + block: Arc>>, + blobs: FixedBlobSidecarList, + publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, + log: Logger, +) -> Receiver>>> { + let chain_cloned = chain.clone(); + let (data_columns_sender, data_columns_receiver) = tokio::sync::mpsc::channel(1); + + chain.task_executor.spawn_blocking( + move || { + let mut timer = metrics::start_timer_vec( + &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, + &[&blobs.len().to_string()], + ); + let blob_refs = blobs + .iter() + .filter_map(|b| b.as_ref().map(|b| &b.blob)) + .collect::>(); + let data_columns_result = blobs_to_data_column_sidecars( + &blob_refs, + &block, + &chain_cloned.kzg, + &chain_cloned.spec, + ) + .discard_timer_on_break(&mut timer); + drop(timer); + + let all_data_columns = match data_columns_result { + Ok(d) => d, + Err(e) => { + error!( + log, + "Failed to build data column sidecars from blobs"; + "error" => ?e + ); + return; + } + }; + + if let Err(e) = data_columns_sender.try_send(all_data_columns.clone()) { + error!(log, "Failed to send computed data columns"; "error" => ?e); + }; + + // Check indices from cache before sending the columns, to make sure we don't + // publish components already seen on gossip. + let is_supernode = chain_cloned.data_availability_checker.is_supernode(); + + // At the moment non supernodes are not required to publish any columns. + // TODO(das): we could experiment with having full nodes publish their custodied + // columns here. + if !is_supernode { + return; + } + + publish_fn(BlobsOrDataColumns::DataColumns(all_data_columns)); + }, + "compute_and_publish_data_columns", + ); + + data_columns_receiver +} + +fn build_blob_sidecars( + block: &Arc>>, + response: Vec>>, + signed_block_header: SignedBeaconBlockHeader, + kzg_commitments_inclusion_proof: &FixedVector, + spec: &ChainSpec, +) -> Result, FetchEngineBlobError> { + let epoch = block.epoch(); + let mut fixed_blob_sidecar_list = + FixedBlobSidecarList::default(spec.max_blobs_per_block(epoch) as usize); + for (index, blob_and_proof) in response + .into_iter() + .enumerate() + .filter_map(|(i, opt_blob)| Some((i, opt_blob?))) + { + match BlobSidecar::new_with_existing_proof( + index, + blob_and_proof.blob, + block, + signed_block_header.clone(), + kzg_commitments_inclusion_proof, + blob_and_proof.proof, + ) { + Ok(blob) => { + if let Some(blob_mut) = fixed_blob_sidecar_list.get_mut(index) { + *blob_mut = Some(Arc::new(blob)); + } else { + return Err(FetchEngineBlobError::InternalError(format!( + "Blobs from EL contains blob with invalid index {index}" + ))); + } + } + Err(e) => { + return Err(FetchEngineBlobError::BlobSidecarError(e)); + } + } + } + Ok(fixed_blob_sidecar_list) +} diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index a23b6ddc1e..ddae54f464 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,5 @@ use crate::data_availability_checker::AvailableBlock; -use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainTypes}; use itertools::Itertools; use slog::debug; use state_processing::{ @@ -10,7 +10,11 @@ use std::borrow::Cow; use std::iter; use std::time::Duration; use store::metadata::DataColumnInfo; -use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore}; +use store::{ + get_key_for_col, AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, + KeyValueStoreOp, +}; +use strum::IntoStaticStr; use types::{FixedBytesExtended, Hash256, Slot}; /// Use a longer timeout on the pubkey cache. @@ -18,10 +22,8 @@ use types::{FixedBytesExtended, Hash256, Slot}; /// It's ok if historical sync is stalled due to writes from forwards block processing. const PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(30); -#[derive(Debug)] +#[derive(Debug, IntoStaticStr)] pub enum HistoricalBlockError { - /// Block is not available (only returned when fetching historic blocks). - BlockOutOfRange { slot: Slot, oldest_block_slot: Slot }, /// Block root mismatch, caller should retry with different blocks. MismatchedBlockRoot { block_root: Hash256, @@ -33,10 +35,16 @@ pub enum HistoricalBlockError { InvalidSignature, /// Transitory error, caller should retry with the same blocks. ValidatorPubkeyCacheTimeout, - /// No historical sync needed. - NoAnchorInfo, /// Logic error: should never occur. IndexOutOfBounds, + /// Internal store error + StoreError(StoreError), +} + +impl From for HistoricalBlockError { + fn from(e: StoreError) -> Self { + Self::StoreError(e) + } } impl BeaconChain { @@ -61,11 +69,8 @@ impl BeaconChain { pub fn import_historical_block_batch( &self, mut blocks: Vec>, - ) -> Result { - let anchor_info = self - .store - .get_anchor_info() - .ok_or(HistoricalBlockError::NoAnchorInfo)?; + ) -> Result { + let anchor_info = self.store.get_anchor_info(); let blob_info = self.store.get_blob_info(); let data_column_info = self.store.get_data_column_info(); @@ -109,8 +114,6 @@ impl BeaconChain { let mut expected_block_root = anchor_info.oldest_block_parent; let mut prev_block_slot = anchor_info.oldest_block_slot; - let mut chunk_writer = - ChunkWriter::::new(&self.store.cold_db, prev_block_slot.as_usize())?; let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot; @@ -127,8 +130,7 @@ impl BeaconChain { return Err(HistoricalBlockError::MismatchedBlockRoot { block_root, expected_block_root, - } - .into()); + }); } let blinded_block = block.clone_as_blinded(); @@ -149,8 +151,11 @@ impl BeaconChain { } // Store block roots, including at all skip slots in the freezer DB. - for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { - chunk_writer.set(slot, block_root, &mut cold_batch)?; + for slot in (block.slot().as_u64()..prev_block_slot.as_u64()).rev() { + cold_batch.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + block_root.as_slice().to_vec(), + )); } prev_block_slot = block.slot(); @@ -162,15 +167,17 @@ impl BeaconChain { // completion. if expected_block_root == self.genesis_block_root { let genesis_slot = self.spec.genesis_slot; - for slot in genesis_slot.as_usize()..prev_block_slot.as_usize() { - chunk_writer.set(slot, self.genesis_block_root, &mut cold_batch)?; + for slot in genesis_slot.as_u64()..prev_block_slot.as_u64() { + cold_batch.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + self.genesis_block_root.as_slice().to_vec(), + )); } prev_block_slot = genesis_slot; expected_block_root = Hash256::zero(); break; } } - chunk_writer.write(&mut cold_batch)?; // these were pushed in reverse order so we reverse again signed_blocks.reverse(); @@ -212,7 +219,7 @@ impl BeaconChain { let verify_timer = metrics::start_timer(&metrics::BACKFILL_SIGNATURE_VERIFY_TIMES); if !signature_set.verify() { - return Err(HistoricalBlockError::InvalidSignature.into()); + return Err(HistoricalBlockError::InvalidSignature); } drop(verify_timer); drop(sig_timer); @@ -262,7 +269,7 @@ impl BeaconChain { let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot); anchor_and_blob_batch.push( self.store - .compare_and_set_anchor_info(Some(anchor_info), Some(new_anchor))?, + .compare_and_set_anchor_info(anchor_info, new_anchor)?, ); self.store.hot_db.do_atomically(anchor_and_blob_batch)?; diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index f966040931..99db463228 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -7,8 +7,8 @@ use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ - Blob, BlobsList, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, + Blob, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, + KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, }; /// Converts a blob ssz List object to an array to be used with the kzg @@ -146,7 +146,7 @@ pub fn verify_kzg_proof( /// Build data column sidecars from a signed beacon block and its blobs. pub fn blobs_to_data_column_sidecars( - blobs: &BlobsList, + blobs: &[&Blob], block: &SignedBeaconBlock, kzg: &Kzg, spec: &ChainSpec, @@ -154,6 +154,7 @@ pub fn blobs_to_data_column_sidecars( if blobs.is_empty() { return Ok(vec![]); } + let kzg_commitments = block .message() .body() @@ -314,19 +315,21 @@ mod test { #[track_caller] fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 0; - let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); assert!(column_sidecars.is_empty()); } #[track_caller] fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); let block_kzg_commitments = signed_block .message() @@ -360,9 +363,10 @@ mod test { #[track_caller] fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); // Now reconstruct let reconstructed_columns = reconstruct_data_columns( diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index b89c00e0af..d9728b9fd4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -28,6 +28,7 @@ pub mod eth1_chain; mod eth1_finalization_cache; pub mod events; pub mod execution_payload; +pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; pub mod graffiti_calculator; @@ -43,10 +44,9 @@ mod naive_aggregation_pool; pub mod observed_aggregates; mod observed_attesters; pub mod observed_block_producers; -mod observed_data_sidecars; +pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; -pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index ca015d0365..78442d8df0 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,25 +1,19 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; -use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, Logger}; use ssz::Decode; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; use tree_hash::TreeHash; -use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, - FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, -}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + LightClientUpdate, MerkleProof, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -69,17 +63,14 @@ impl LightClientServerCache { block_post_state: &mut BeaconState, ) -> Result<(), BeaconChainError> { let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); - + let fork_name = spec.fork_name_at_slot::(block.slot()); // Only post-altair - if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { - return Ok(()); + if fork_name.altair_enabled() { + // Persist in memory cache for a descendent block + let cached_data = LightClientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); } - // Persist in memory cache for a descendent block - - let cached_data = LightClientCachedData::from_state(block_post_state)?; - self.prev_block_cache.lock().put(block_root, cached_data); - Ok(()) } @@ -94,6 +85,7 @@ impl LightClientServerCache { log: &Logger, chain_spec: &ChainSpec, ) -> Result<(), BeaconChainError> { + metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES); @@ -214,6 +206,7 @@ impl LightClientServerCache { *self.latest_light_client_update.write() = Some(new_light_client_update); } + metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PROCESSING_SUCCESSES); Ok(()) } @@ -289,6 +282,11 @@ impl LightClientServerCache { let (sync_committee_bytes, light_client_update_bytes) = res?; let sync_committee_period = u64::from_ssz_bytes(&sync_committee_bytes) .map_err(store::errors::Error::SszDecodeError)?; + + if sync_committee_period >= start_period + count { + break; + } + let epoch = sync_committee_period .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; @@ -299,10 +297,6 @@ impl LightClientServerCache { .map_err(store::errors::Error::SszDecodeError)?; light_client_updates.push(light_client_update); - - if sync_committee_period >= start_period + count { - break; - } } Ok(light_client_updates) } @@ -413,16 +407,12 @@ impl Default for LightClientServerCache { } } -type FinalityBranch = FixedVector; -type NextSyncCommitteeBranch = FixedVector; -type CurrentSyncCommitteeBranch = FixedVector; - #[derive(Clone)] struct LightClientCachedData { finalized_checkpoint: Checkpoint, - finality_branch: FinalityBranch, - next_sync_committee_branch: NextSyncCommitteeBranch, - current_sync_committee_branch: CurrentSyncCommitteeBranch, + finality_branch: MerkleProof, + next_sync_committee_branch: MerkleProof, + current_sync_committee_branch: MerkleProof, next_sync_committee: Arc>, current_sync_committee: Arc>, finalized_block_root: Hash256, @@ -430,17 +420,18 @@ struct LightClientCachedData { impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { + let (finality_branch, next_sync_committee_branch, current_sync_committee_branch) = ( + state.compute_finalized_root_proof()?, + state.compute_current_sync_committee_proof()?, + state.compute_next_sync_committee_proof()?, + ); Ok(Self { finalized_checkpoint: state.finalized_checkpoint(), - finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finality_branch, next_sync_committee: state.next_sync_committee()?.clone(), current_sync_committee: state.current_sync_committee()?.clone(), - next_sync_committee_branch: state - .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? - .into(), - current_sync_committee_branch: state - .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? - .into(), + next_sync_committee_branch, + current_sync_committee_branch, finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 0b5608f084..c6aa9fbcac 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -2,7 +2,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::FixedBytesExtended; -pub use lighthouse_metrics::*; +pub use metrics::*; use slot_clock::SlotClock; use std::sync::LazyLock; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; @@ -111,6 +111,13 @@ pub static BLOCK_PROCESSING_POST_EXEC_PROCESSING: LazyLock> = linear_buckets(5e-3, 5e-3, 10), ) }); +pub static BLOCK_PROCESSING_DATA_COLUMNS_WAIT: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_block_processing_data_columns_wait_seconds", + "Time spent waiting for data columns to be computed before starting database write", + exponential_buckets(0.01, 2.0, 10), + ) +}); pub static BLOCK_PROCESSING_DB_WRITE: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_block_processing_db_write_seconds", @@ -1691,6 +1698,34 @@ pub static DATA_COLUMNS_SIDECAR_PROCESSING_SUCCESSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_blobs_from_el_hit_total", + "Number of blob batches fetched from the execution layer", + ) +}); + +pub static BLOBS_FROM_EL_MISS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_blobs_from_el_miss_total", + "Number of blob batches failed to fetch from the execution layer", + ) +}); + +pub static BLOBS_FROM_EL_EXPECTED_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_blobs_from_el_expected_total", + "Number of blobs expected from the execution layer", + ) +}); + +pub static BLOBS_FROM_EL_RECEIVED_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_blobs_from_el_received_total", + "Number of blobs fetched from the execution layer", + ) +}); + /* * Light server message verification */ @@ -1937,6 +1972,22 @@ pub static LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_light_client_server_cache_processing_requests", + "Count of all requests to recompute and cache updates", + ) + }); + +pub static LIGHT_CLIENT_SERVER_CACHE_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_light_client_server_cache_processing_successes", + "Count of all successful requests to recompute and cache updates", + ) + }); + /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { @@ -1953,6 +2004,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); let chain_metrics = beacon_chain.metrics(); + // Kept duplicated for backwards compatibility set_gauge_by_usize( &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, beacon_chain.store.state_cache_len(), @@ -2016,6 +2068,8 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { .canonical_head .fork_choice_read_lock() .scrape_for_metrics(); + + beacon_chain.store.register_metrics(); } /// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index f83df7b446..bc4b8e1ed8 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -24,6 +24,12 @@ const MAX_COMPACTION_PERIOD_SECONDS: u64 = 604800; const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`. const COMPACTION_FINALITY_DISTANCE: u64 = 1024; +/// Maximum number of blocks applied in each reconstruction burst. +/// +/// This limits the amount of time that the finalization migration is paused for. We set this +/// conservatively because pausing the finalization migration for too long can cause hot state +/// cache misses and excessive disk use. +const BLOCKS_PER_RECONSTRUCTION: usize = 1024; /// Default number of epochs to wait between finalization migrations. pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; @@ -188,7 +194,9 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, log: &Logger) { - if let Err(e) = db.reconstruct_historic_states() { - error!( - log, - "State reconstruction failed"; - "error" => ?e, - ); + pub fn run_reconstruction( + db: Arc>, + opt_tx: Option>, + log: &Logger, + ) { + match db.reconstruct_historic_states(Some(BLOCKS_PER_RECONSTRUCTION)) { + Ok(()) => { + // Schedule another reconstruction batch if required and we have access to the + // channel for requeueing. + if let Some(tx) = opt_tx { + if !db.get_anchor_info().all_historic_states_stored() { + if let Err(e) = tx.send(Notification::Reconstruction) { + error!( + log, + "Unable to requeue reconstruction notification"; + "error" => ?e + ); + } + } + } + } + Err(e) => { + error!( + log, + "State reconstruction failed"; + "error" => ?e, + ); + } } } @@ -388,6 +417,7 @@ impl, Cold: ItemStore> BackgroundMigrator (mpsc::Sender, thread::JoinHandle<()>) { let (tx, rx) = mpsc::channel(); + let inner_tx = tx.clone(); let thread = thread::spawn(move || { while let Ok(notif) = rx.recv() { let mut reconstruction_notif = None; @@ -418,16 +448,17 @@ impl, Cold: ItemStore> BackgroundMigrator Result; } -impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> { +impl SubsetItem for AttestationRef<'_, E> { type Item = BitList; fn is_subset(&self, other: &Self::Item) -> bool { match self { @@ -159,7 +159,7 @@ impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> { } } -impl<'a, E: EthSpec> SubsetItem for &'a SyncCommitteeContribution { +impl SubsetItem for &SyncCommitteeContribution { type Item = BitVector; fn is_subset(&self, other: &Self::Item) -> bool { self.aggregation_bits.is_subset(other) diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 2720090778..a9ab8f4e10 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -150,6 +150,31 @@ impl ObservedDataSidecars { } } +/// Abstraction to control "observation" of gossip messages (currently just blobs and data columns). +/// +/// If a type returns `false` for `observe` then the message will not be immediately added to its +/// respective gossip observation cache. Unobserved messages should usually be observed later. +pub trait ObservationStrategy { + fn observe() -> bool; +} + +/// Type for messages that are observed immediately. +pub struct Observe; +/// Type for messages that have not been observed. +pub struct DoNotObserve; + +impl ObservationStrategy for Observe { + fn observe() -> bool { + true + } +} + +impl ObservationStrategy for DoNotObserve { + fn observe() -> bool { + false + } +} + #[cfg(test)] mod tests { use super::*; @@ -310,7 +335,7 @@ mod tests { #[test] fn simple_observations() { let spec = Arc::new(test_spec::()); - let mut cache = ObservedDataSidecars::>::new(spec); + let mut cache = ObservedDataSidecars::>::new(spec.clone()); // Slot 0, index 0 let proposer_index_a = 420; diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs deleted file mode 100644 index 31034a7d59..0000000000 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ /dev/null @@ -1,381 +0,0 @@ -use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; -use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, -}; -use itertools::process_results; -use proto_array::InvalidationOperation; -use slog::{crit, debug, error, info, warn}; -use slot_clock::SlotClock; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::is_merge_transition_complete; -use std::sync::Arc; -use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; -use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::time::sleep; -use tree_hash::TreeHash; -use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; -use DBColumn::OptimisticTransitionBlock as OTBColumn; - -#[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct OptimisticTransitionBlock { - root: Hash256, - slot: Slot, -} - -impl OptimisticTransitionBlock { - // types::BeaconBlockRef<'_, ::EthSpec> - pub fn from_block(block: BeaconBlockRef) -> Self { - Self { - root: block.tree_hash_root(), - slot: block.slot(), - } - } - - pub fn root(&self) -> &Hash256 { - &self.root - } - - pub fn slot(&self) -> &Slot { - &self.slot - } - - pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - if store - .as_ref() - .item_exists::(&self.root)? - { - Ok(()) - } else { - store.as_ref().put_item(&self.root, self) - } - } - - pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - store - .as_ref() - .hot_db - .key_delete(OTBColumn.into(), self.root.as_slice()) - } - - fn is_canonical( - &self, - chain: &BeaconChain, - ) -> Result { - Ok(chain - .forwards_iter_block_roots_until(self.slot, self.slot)? - .next() - .transpose()? - .map(|(root, _)| root) - == Some(self.root)) - } -} - -impl StoreItem for OptimisticTransitionBlock { - fn db_column() -> DBColumn { - OTBColumn - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - -/// The routine is expected to run once per epoch, 1/4th through the epoch. -pub const EPOCH_DELAY_FACTOR: u32 = 4; - -/// Spawns a routine which checks the validity of any optimistically imported transition blocks -/// -/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after -/// the start of each epoch. -/// -/// The service will not be started if there is no `execution_layer` on the `chain`. -pub fn start_otb_verification_service( - executor: TaskExecutor, - chain: Arc>, -) { - // Avoid spawning the service if there's no EL, it'll just error anyway. - if chain.execution_layer.is_some() { - executor.spawn( - async move { otb_verification_service(chain).await }, - "otb_verification_service", - ); - } -} - -pub fn load_optimistic_transition_blocks( - chain: &BeaconChain, -) -> Result, StoreError> { - process_results( - chain.store.hot_db.iter_column::(OTBColumn), - |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - }, - )? -} - -#[derive(Debug)] -pub enum Error { - ForkChoice(String), - BeaconChain(BeaconChainError), - StoreError(StoreError), - NoBlockFound(OptimisticTransitionBlock), -} - -pub async fn validate_optimistic_transition_blocks( - chain: &Arc>, - otbs: Vec, -) -> Result<(), Error> { - let finalized_slot = chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? - .slot; - - // separate otbs into - // non-canonical - // finalized canonical - // unfinalized canonical - let mut non_canonical_otbs = vec![]; - let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( - otbs.into_iter().map(|otb| { - otb.is_canonical(chain) - .map(|is_canonical| (otb, is_canonical)) - }), - |pair_iter| { - pair_iter - .filter_map(|(otb, is_canonical)| { - if is_canonical { - Some(otb) - } else { - non_canonical_otbs.push(otb); - None - } - }) - .partition::, _>(|otb| *otb.slot() <= finalized_slot) - }, - ) - .map_err(Error::BeaconChain)?; - - // remove non-canonical blocks that conflict with finalized checkpoint from the database - for otb in non_canonical_otbs { - if *otb.slot() <= finalized_slot { - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - } - } - - // ensure finalized canonical otb are valid, otherwise kill client - for otb in finalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Finalized Merge Transition Block is Invalid! Kill the Client! - crit!( - chain.log, - "Finalized merge transition block is invalid!"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block.canonical_root() - ); - let mut shutdown_sender = chain.shutdown_sender(); - if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - )) { - crit!( - chain.log, - "Failed to shut down client"; - "error" => ?e, - "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - ); - } - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - // attempt to validate any non-finalized canonical otb blocks - for otb in unfinalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "not finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload - warn!( - chain.log, - "Merge transition block invalid"; - "block_root" => ?otb.root() - ); - chain - .process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: *otb.root(), - }, - ) - .await - .map_err(|e| { - warn!( - chain.log, - "Error checking merge transition block"; - "error" => ?e, - "location" => "process_invalid_execution_payload" - ); - Error::BeaconChain(e) - })?; - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - Ok(()) -} - -/// Loop until any optimistically imported merge transition blocks have been verified and -/// the merge has been finalized. -async fn otb_verification_service(chain: Arc>) { - let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; - loop { - match chain - .slot_clock - .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) - { - Some(duration) => { - let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; - sleep(duration + additional_delay).await; - - debug!( - chain.log, - "OTB verification service firing"; - ); - - if !is_merge_transition_complete( - &chain.canonical_head.cached_head().snapshot.beacon_state, - ) { - // We are pre-merge. Nothing to do yet. - continue; - } - - // load all optimistically imported transition blocks from the database - match load_optimistic_transition_blocks(chain.as_ref()) { - Ok(otbs) => { - if otbs.is_empty() { - if chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_or(false, |block| { - block.execution_status.is_execution_enabled() - }) - { - // there are no optimistic blocks in the database, we can exit - // the service since the merge transition is finalized and we'll - // never see another transition block - break; - } else { - debug!( - chain.log, - "No optimistic transition blocks"; - "info" => "waiting for the merge transition to finalize" - ) - } - } - if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { - warn!( - chain.log, - "Error while validating optimistic transition blocks"; - "error" => ?e - ); - } - } - Err(e) => { - error!( - chain.log, - "Error loading optimistic transition blocks"; - "error" => ?e - ); - } - }; - } - None => { - error!(chain.log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(chain.slot_clock.slot_duration()).await; - } - }; - } - debug!( - chain.log, - "No optimistic transition blocks in database"; - "msg" => "shutting down OTB verification service" - ); -} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 4f7770e22c..9504901229 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,24 +1,23 @@ //! Utilities for managing database schema changes. mod migration_schema_v20; mod migration_schema_v21; +mod migration_schema_v22; use crate::beacon_chain::BeaconChainTypes; -use crate::types::ChainSpec; use slog::Logger; use std::sync::Arc; use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::Error as StoreError; +use types::Hash256; /// Migrate the database from one schema version to another, applying all requisite mutations. -#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future pub fn migrate_schema( db: Arc>, - deposit_contract_deploy_block: u64, + genesis_state_root: Option, from: SchemaVersion, to: SchemaVersion, log: Logger, - spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. @@ -26,28 +25,14 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::( - db.clone(), - deposit_contract_deploy_block, - from, - next, - log.clone(), - spec, - )?; - migrate_schema::(db, deposit_contract_deploy_block, next, to, log, spec) + migrate_schema::(db.clone(), genesis_state_root, from, next, log.clone())?; + migrate_schema::(db, genesis_state_root, next, to, log) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::( - db.clone(), - deposit_contract_deploy_block, - from, - next, - log.clone(), - spec, - )?; - migrate_schema::(db, deposit_contract_deploy_block, next, to, log, spec) + migrate_schema::(db.clone(), genesis_state_root, from, next, log.clone())?; + migrate_schema::(db, genesis_state_root, next, to, log) } // @@ -69,6 +54,11 @@ pub fn migrate_schema( let ops = migration_schema_v21::downgrade_from_v21::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(21), SchemaVersion(22)) => { + // This migration needs to sync data between hot and cold DBs. The schema version is + // bumped inside the upgrade_to_v22 fn + migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root, log) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs new file mode 100644 index 0000000000..c34512eded --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -0,0 +1,212 @@ +use crate::beacon_chain::BeaconChainTypes; +use slog::{info, Logger}; +use std::sync::Arc; +use store::chunked_iter::ChunkedVectorIter; +use store::{ + chunked_vector::BlockRootsChunked, + get_key_for_col, + metadata::{ + SchemaVersion, ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN, + }, + partial_beacon_state::PartialBeaconState, + AnchorInfo, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, +}; +use types::{BeaconState, Hash256, Slot}; + +const LOG_EVERY: usize = 200_000; + +fn load_old_schema_frozen_state( + db: &HotColdDB, + state_root: Hash256, +) -> Result>, Error> { + let Some(partial_state_bytes) = db + .cold_db + .get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? + else { + return Ok(None); + }; + let mut partial_state: PartialBeaconState = + PartialBeaconState::from_ssz_bytes(&partial_state_bytes, db.get_chain_spec())?; + + // Fill in the fields of the partial state. + partial_state.load_block_roots(&db.cold_db, db.get_chain_spec())?; + partial_state.load_state_roots(&db.cold_db, db.get_chain_spec())?; + partial_state.load_historical_roots(&db.cold_db, db.get_chain_spec())?; + partial_state.load_randao_mixes(&db.cold_db, db.get_chain_spec())?; + partial_state.load_historical_summaries(&db.cold_db, db.get_chain_spec())?; + + partial_state.try_into().map(Some) +} + +pub fn upgrade_to_v22( + db: Arc>, + genesis_state_root: Option, + log: Logger, +) -> Result<(), Error> { + info!(log, "Upgrading from v21 to v22"); + + let old_anchor = db.get_anchor_info(); + + // If the anchor was uninitialized in the old schema (`None`), this represents a full archive + // node. + let effective_anchor = if old_anchor == ANCHOR_UNINITIALIZED { + ANCHOR_FOR_ARCHIVE_NODE + } else { + old_anchor.clone() + }; + + let split_slot = db.get_split_slot(); + let genesis_state_root = genesis_state_root.ok_or(Error::GenesisStateUnknown)?; + + let mut cold_ops = vec![]; + + // Load the genesis state in the previous chunked format, BEFORE we go deleting or rewriting + // anything. + let mut genesis_state = load_old_schema_frozen_state::(&db, genesis_state_root)? + .ok_or(Error::MissingGenesisState)?; + let genesis_state_root = genesis_state.update_tree_hash_cache()?; + let genesis_block_root = genesis_state.get_latest_block_root(genesis_state_root); + + // Store the genesis state in the new format, prior to updating the schema version on disk. + // In case of a crash no data is lost because we will re-load it in the old format and re-do + // this write. + if split_slot > 0 { + info!( + log, + "Re-storing genesis state"; + "state_root" => ?genesis_state_root, + ); + db.store_cold_state(&genesis_state_root, &genesis_state, &mut cold_ops)?; + } + + // Write the block roots in the new format in a new column. Similar to above, we do this + // separately from deleting the old format block roots so that this is crash safe. + let oldest_block_slot = effective_anchor.oldest_block_slot; + write_new_schema_block_roots::( + &db, + genesis_block_root, + oldest_block_slot, + split_slot, + &mut cold_ops, + &log, + )?; + + // Commit this first batch of non-destructive cold database ops. + db.cold_db.do_atomically(cold_ops)?; + + // Now we update the anchor and the schema version atomically in the hot database. + // + // If we crash after commiting this change, then there will be some leftover cruft left in the + // freezer database, but no corruption because all the new-format data has already been written + // above. + let new_anchor = AnchorInfo { + state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, + state_lower_limit: Slot::new(0), + ..effective_anchor.clone() + }; + let hot_ops = vec![db.compare_and_set_anchor_info(old_anchor, new_anchor)?]; + db.store_schema_version_atomically(SchemaVersion(22), hot_ops)?; + + // Finally, clean up the old-format data from the freezer database. + delete_old_schema_freezer_data::(&db, &log)?; + + Ok(()) +} + +pub fn delete_old_schema_freezer_data( + db: &Arc>, + log: &Logger, +) -> Result<(), Error> { + let mut cold_ops = vec![]; + + let columns = [ + DBColumn::BeaconState, + // Cold state summaries indexed by state root were stored in this column. + DBColumn::BeaconStateSummary, + // Mapping from restore point number to state root was stored in this column. + DBColumn::BeaconRestorePoint, + // Chunked vector values were stored in these columns. + DBColumn::BeaconHistoricalRoots, + DBColumn::BeaconRandaoMixes, + DBColumn::BeaconHistoricalSummaries, + DBColumn::BeaconBlockRootsChunked, + DBColumn::BeaconStateRootsChunked, + ]; + + for column in columns { + for res in db.cold_db.iter_column_keys::>(column) { + let key = res?; + cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col( + column.as_str(), + &key, + ))); + } + } + let delete_ops = cold_ops.len(); + + info!( + log, + "Deleting historic states"; + "delete_ops" => delete_ops, + ); + db.cold_db.do_atomically(cold_ops)?; + + // In order to reclaim space, we need to compact the freezer DB as well. + db.compact_freezer()?; + + Ok(()) +} + +pub fn write_new_schema_block_roots( + db: &HotColdDB, + genesis_block_root: Hash256, + oldest_block_slot: Slot, + split_slot: Slot, + cold_ops: &mut Vec, + log: &Logger, +) -> Result<(), Error> { + info!( + log, + "Starting beacon block root migration"; + "oldest_block_slot" => oldest_block_slot, + "genesis_block_root" => ?genesis_block_root, + ); + + // Store the genesis block root if it would otherwise not be stored. + if oldest_block_slot != 0 { + cold_ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col(DBColumn::BeaconBlockRoots.into(), &0u64.to_be_bytes()), + genesis_block_root.as_slice().to_vec(), + )); + } + + // Block roots are available from the `oldest_block_slot` to the `split_slot`. + let start_vindex = oldest_block_slot.as_usize(); + let block_root_iter = ChunkedVectorIter::::new( + db, + start_vindex, + split_slot, + db.get_chain_spec(), + ); + + // OK to hold these in memory (10M slots * 43 bytes per KV ~= 430 MB). + for (i, (slot, block_root)) in block_root_iter.enumerate() { + cold_ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconBlockRoots.into(), + &(slot as u64).to_be_bytes(), + ), + block_root.as_slice().to_vec(), + )); + + if i > 0 && i % LOG_EVERY == 0 { + info!( + log, + "Beacon block root migration in progress"; + "roots_migrated" => i + ); + } + } + + Ok(()) +} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index a662cc49c9..da1d60db17 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -512,7 +512,7 @@ mod test { } assert!( - !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), + !cache.contains(&shuffling_id_and_committee_caches.first().unwrap().0), "should not contain oldest epoch shuffling id" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index cb2b4428a1..a792f5f404 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2896,7 +2896,6 @@ pub fn generate_rand_block_and_blobs( (block, blob_sidecars) } -#[allow(clippy::type_complexity)] pub fn generate_rand_block_and_data_columns( fork_name: ForkName, num_blobs: NumBlobs, @@ -2904,12 +2903,12 @@ pub fn generate_rand_block_and_data_columns( spec: &ChainSpec, ) -> ( SignedBeaconBlock>, - Vec>>, + DataColumnSidecarList, ) { let kzg = get_kzg(spec); let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); - let blob: BlobsList = blobs.into_iter().map(|b| b.blob).collect::>().into(); - let data_columns = blobs_to_data_column_sidecars(&blob, &block, &kzg, spec).unwrap(); + let blob_refs = blobs.iter().map(|b| &b.blob).collect::>(); + let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap(); (block, data_columns) } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index e1f2cbb284..87fefe7114 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -70,12 +70,12 @@ async fn produces_attestations_from_attestation_simulator_service() { } // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations - let hit_prometheus_metrics = vec![ + let hit_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, ]; - let miss_prometheus_metrics = vec![ + let miss_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, @@ -86,7 +86,7 @@ async fn produces_attestations_from_attestation_simulator_service() { let expected_miss_metrics_count = 0; let expected_hit_metrics_count = num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; - lighthouse_metrics::gather().iter().for_each(|mf| { + metrics::gather().iter().for_each(|mf| { if hit_prometheus_metrics.contains(&mf.get_name()) { assert_eq!( mf.get_metric()[0].get_counter().get_value() as u64, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e168cbb6f4..dcc63ddf62 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -431,10 +431,12 @@ impl GossipTester { .chain .verify_aggregated_attestation_for_gossip(&aggregate) .err() - .expect(&format!( - "{} should error during verify_aggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_aggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -449,10 +451,12 @@ impl GossipTester { .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_aggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_aggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -475,10 +479,12 @@ impl GossipTester { .chain .verify_unaggregated_attestation_for_gossip(&attn, Some(subnet_id)) .err() - .expect(&format!( - "{} should error during verify_unaggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_unaggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -496,10 +502,12 @@ impl GossipTester { ) .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_unaggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_unaggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -816,7 +824,7 @@ async fn aggregated_gossip_verification() { let (index, sk) = tester.non_aggregator(); *a = SignedAggregateAndProof::from_aggregate( index as u64, - tester.valid_aggregate.message().aggregate().clone(), + tester.valid_aggregate.message().aggregate(), None, &sk, &chain.canonical_head.cached_head().head_fork(), diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 5bd3452623..5080b0890b 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -82,7 +82,7 @@ async fn merge_with_terminal_block_hash_override() { let block = &harness.chain.head_snapshot().beacon_block; - let execution_payload = block.message().body().execution_payload().unwrap().clone(); + let execution_payload = block.message().body().execution_payload().unwrap(); if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } @@ -133,7 +133,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -207,15 +207,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push( - block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(), - ); + execution_payloads.push(block.message().body().execution_payload().unwrap().into()); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 223249ccd4..078a5d848c 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -976,7 +976,7 @@ async fn block_gossip_verification() { harness .chain - .process_gossip_blob(gossip_verified, || Ok(())) + .process_gossip_blob(gossip_verified) .await .expect("should import valid gossip verified blob"); } @@ -1247,7 +1247,7 @@ async fn verify_block_for_gossip_slashing_detection() { .unwrap(); harness .chain - .process_gossip_blob(verified_blob, || Ok(())) + .process_gossip_blob(verified_blob) .await .unwrap(); } @@ -1726,7 +1726,7 @@ async fn import_execution_pending_block( .unwrap() { ExecutedBlock::Available(block) => chain - .import_available_block(Box::from(block)) + .import_available_block(Box::from(block), None) .await .map_err(|e| format!("{e:?}")), ExecutedBlock::AvailabilityPending(_) => { diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index ac97a95721..3ce5702f2e 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -54,7 +54,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot).await; + Box::pin(harness.extend_to_slot(altair_fork_slot)).await; let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); @@ -63,7 +63,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -81,7 +81,7 @@ async fn base_altair_bellatrix_capella() { /* * Next Bellatrix block shouldn't include an exec payload. */ - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( @@ -112,7 +112,7 @@ async fn base_altair_bellatrix_capella() { terminal_block.timestamp = timestamp; } }); - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 0eb63a0c7f..c9bd55e062 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -35,7 +35,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { let _ = harness .chain - .process_gossip_blob(gossip_verified_blob, || Ok(())) + .process_gossip_blob(gossip_verified_blob) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1325875a27..01b790bb25 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -413,7 +413,7 @@ async fn invalid_payload_invalidates_parent() { rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; - let roots = vec![ + let roots = [ rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, @@ -986,10 +986,13 @@ async fn payload_preparation() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(1), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; @@ -1049,7 +1052,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1119,10 +1122,13 @@ async fn payload_preparation_before_transition_block() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(0), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 1a6b444319..e1258ccdea 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -25,13 +25,10 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::{Arc, LazyLock}; use std::time::Duration; -use store::chunked_vector::Chunk; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; use store::{ - chunked_vector::{chunk_key, Field}, - get_key_for_col, iter::{BlockRootsIterator, StateRootsIterator}, - BlobInfo, DBColumn, HotColdDB, KeyValueStore, KeyValueStoreOp, LevelDB, StoreConfig, + BlobInfo, DBColumn, HotColdDB, LevelDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; @@ -58,8 +55,8 @@ fn get_store_generic( config: StoreConfig, spec: ChainSpec, ) -> Arc, LevelDB>> { - let hot_path = db_path.path().join("hot_db"); - let cold_path = db_path.path().join("cold_db"); + let hot_path = db_path.path().join("chain_db"); + let cold_path = db_path.path().join("freezer_db"); let blobs_path = db_path.path().join("blobs_db"); let log = test_logger(); @@ -112,19 +109,8 @@ async fn light_client_bootstrap_test() { return; }; - let checkpoint_slot = Slot::new(E::slots_per_epoch() * 6); let db_path = tempdir().unwrap(); - let log = test_logger(); - - let seconds_per_slot = spec.seconds_per_slot; - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); + let store = get_store_generic(&db_path, StoreConfig::default(), spec.clone()); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let num_initial_slots = E::slots_per_epoch() * 7; @@ -142,78 +128,8 @@ async fn light_client_bootstrap_test() { ) .await; - let wss_block_root = harness + let finalized_checkpoint = harness .chain - .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness - .chain - .store - .get_full_block(&wss_block_root) - .unwrap() - .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); - let wss_state = store - .get_state(&wss_state_root, Some(checkpoint_slot)) - .unwrap() - .unwrap(); - - let kzg = get_kzg(&spec); - - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); - - // Initialise a new beacon chain from the finalized checkpoint. - // The slot clock must be set to a time ahead of the checkpoint state. - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - - let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) - .store(store.clone()) - .custom_spec(test_spec::().into()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state( - wss_state, - wss_block.clone(), - wss_blobs_opt.clone(), - genesis_state, - ) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .execution_layer(Some(mock.el)) - .build() - .expect("should build"); - - let current_state = harness.get_current_state(); - - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - - let finalized_checkpoint = beacon_chain .canonical_head .cached_head() .finalized_checkpoint(); @@ -248,19 +164,8 @@ async fn light_client_updates_test() { }; let num_final_blocks = E::slots_per_epoch() * 2; - let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let db_path = tempdir().unwrap(); - let log = test_logger(); - - let seconds_per_slot = spec.seconds_per_slot; - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); + let store = get_store_generic(&db_path, StoreConfig::default(), test_spec::()); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let num_initial_slots = E::slots_per_epoch() * 10; @@ -276,33 +181,6 @@ async fn light_client_updates_test() { ) .await; - let wss_block_root = harness - .chain - .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness - .chain - .store - .get_full_block(&wss_block_root) - .unwrap() - .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); - let wss_state = store - .get_state(&wss_state_root, Some(checkpoint_slot)) - .unwrap() - .unwrap(); - - let kzg = get_kzg(&spec); - - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); - harness.advance_slot(); harness .extend_chain_with_light_client_data( @@ -312,52 +190,8 @@ async fn light_client_updates_test() { ) .await; - // Initialise a new beacon chain from the finalized checkpoint. - // The slot clock must be set to a time ahead of the checkpoint state. - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - - let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) - .store(store.clone()) - .custom_spec(test_spec::().into()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state( - wss_state, - wss_block.clone(), - wss_blobs_opt.clone(), - genesis_state, - ) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .execution_layer(Some(mock.el)) - .build() - .expect("should build"); - - let beacon_chain = Arc::new(beacon_chain); - let current_state = harness.get_current_state(); - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) @@ -366,7 +200,8 @@ async fn light_client_updates_test() { // fetch a range of light client updates. right now there should only be one light client update // in the db. - let lc_updates = beacon_chain + let lc_updates = harness + .chain .get_light_client_updates(sync_period, 100) .unwrap(); @@ -386,260 +221,14 @@ async fn light_client_updates_test() { .await; // we should now have two light client updates in the db - let lc_updates = beacon_chain + let lc_updates = harness + .chain .get_light_client_updates(sync_period, 100) .unwrap(); assert_eq!(lc_updates.len(), 2); } -/// Tests that `store.heal_freezer_block_roots_at_split` inserts block roots between last restore point -/// slot and the split slot. -#[tokio::test] -async fn heal_freezer_block_roots_at_split() { - // chunk_size is hard-coded to 128 - let num_blocks_produced = E::slots_per_epoch() * 20; - let db_path = tempdir().unwrap(); - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - harness - .extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - let split_slot = store.get_split_slot(); - assert_eq!(split_slot, 18 * E::slots_per_epoch()); - - // Do a heal before deleting to make sure that it doesn't break. - let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); - store.heal_freezer_block_roots_at_split().unwrap(); - check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); - - // Delete block roots between `last_restore_point_slot` and `split_slot`. - let chunk_index = >::chunk_index( - last_restore_point_slot.as_usize(), - ); - let key_chunk = get_key_for_col(DBColumn::BeaconBlockRoots.as_str(), &chunk_key(chunk_index)); - store - .cold_db - .do_atomically(vec![KeyValueStoreOp::DeleteKey(key_chunk)]) - .unwrap(); - - let block_root_err = store - .forwards_block_roots_iterator_until( - last_restore_point_slot, - last_restore_point_slot + 1, - || unreachable!(), - &harness.chain.spec, - ) - .unwrap() - .next() - .unwrap() - .unwrap_err(); - - assert!(matches!(block_root_err, store::Error::NoContinuationData)); - - // Re-insert block roots - store.heal_freezer_block_roots_at_split().unwrap(); - check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); - - // Run for another two epochs to check that the invariant is maintained. - let additional_blocks_produced = 2 * E::slots_per_epoch(); - harness - .extend_slots(additional_blocks_produced as usize) - .await; - - check_finalization(&harness, num_blocks_produced + additional_blocks_produced); - check_split_slot(&harness, store); - check_chain_dump( - &harness, - num_blocks_produced + additional_blocks_produced + 1, - ); - check_iterators(&harness); -} - -/// Tests that `store.heal_freezer_block_roots` inserts block roots between last restore point -/// slot and the split slot. -#[tokio::test] -async fn heal_freezer_block_roots_with_skip_slots() { - // chunk_size is hard-coded to 128 - let num_blocks_produced = E::slots_per_epoch() * 20; - let db_path = tempdir().unwrap(); - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - let mut current_state = harness.get_current_state(); - let state_root = current_state.canonical_root().unwrap(); - let all_validators = &harness.get_all_validators(); - harness - .add_attested_blocks_at_slots( - current_state, - state_root, - &(1..=num_blocks_produced) - .filter(|i| i % 12 != 0) - .map(Slot::new) - .collect::>(), - all_validators, - ) - .await; - - // split slot should be 18 here - let split_slot = store.get_split_slot(); - assert_eq!(split_slot, 18 * E::slots_per_epoch()); - - let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); - let chunk_index = >::chunk_index( - last_restore_point_slot.as_usize(), - ); - let key_chunk = get_key_for_col(DBColumn::BeaconBlockRoots.as_str(), &chunk_key(chunk_index)); - store - .cold_db - .do_atomically(vec![KeyValueStoreOp::DeleteKey(key_chunk)]) - .unwrap(); - - let block_root_err = store - .forwards_block_roots_iterator_until( - last_restore_point_slot, - last_restore_point_slot + 1, - || unreachable!(), - &harness.chain.spec, - ) - .unwrap() - .next() - .unwrap() - .unwrap_err(); - - assert!(matches!(block_root_err, store::Error::NoContinuationData)); - - // heal function - store.heal_freezer_block_roots_at_split().unwrap(); - check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); - - // Run for another two epochs to check that the invariant is maintained. - let additional_blocks_produced = 2 * E::slots_per_epoch(); - harness - .extend_slots(additional_blocks_produced as usize) - .await; - - check_finalization(&harness, num_blocks_produced + additional_blocks_produced); - check_split_slot(&harness, store); - check_iterators(&harness); -} - -/// Tests that `store.heal_freezer_block_roots_at_genesis` replaces 0x0 block roots between slot -/// 0 and the first non-skip slot with genesis block root. -#[tokio::test] -async fn heal_freezer_block_roots_at_genesis() { - // Run for a few epochs to ensure we're past finalization. - let num_blocks_produced = E::slots_per_epoch() * 4; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - // Start with 2 skip slots. - harness.advance_slot(); - harness.advance_slot(); - - harness - .extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - // Do a heal before deleting to make sure that it doesn't break. - store.heal_freezer_block_roots_at_genesis().unwrap(); - check_freezer_block_roots( - &harness, - Slot::new(0), - Epoch::new(1).end_slot(E::slots_per_epoch()), - ); - - // Write 0x0 block roots at slot 1 and slot 2. - let chunk_index = 0; - let chunk_db_key = chunk_key(chunk_index); - let mut chunk = - Chunk::::load(&store.cold_db, DBColumn::BeaconBlockRoots, &chunk_db_key) - .unwrap() - .unwrap(); - - chunk.values[1] = Hash256::zero(); - chunk.values[2] = Hash256::zero(); - - let mut ops = vec![]; - chunk - .store(DBColumn::BeaconBlockRoots, &chunk_db_key, &mut ops) - .unwrap(); - store.cold_db.do_atomically(ops).unwrap(); - - // Ensure the DB is corrupted - let block_roots = store - .forwards_block_roots_iterator_until( - Slot::new(1), - Slot::new(2), - || unreachable!(), - &harness.chain.spec, - ) - .unwrap() - .map(Result::unwrap) - .take(2) - .collect::>(); - assert_eq!( - block_roots, - vec![ - (Hash256::zero(), Slot::new(1)), - (Hash256::zero(), Slot::new(2)) - ] - ); - - // Insert genesis block roots at skip slots before first block slot - store.heal_freezer_block_roots_at_genesis().unwrap(); - check_freezer_block_roots( - &harness, - Slot::new(0), - Epoch::new(1).end_slot(E::slots_per_epoch()), - ); -} - -fn check_freezer_block_roots(harness: &TestHarness, start_slot: Slot, end_slot: Slot) { - for slot in (start_slot.as_u64()..end_slot.as_u64()).map(Slot::new) { - let (block_root, result_slot) = harness - .chain - .store - .forwards_block_roots_iterator_until(slot, slot, || unreachable!(), &harness.chain.spec) - .unwrap() - .next() - .unwrap() - .unwrap(); - assert_eq!(slot, result_slot); - let expected_block_root = harness - .chain - .block_root_at_slot(slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - assert_eq!(expected_block_root, block_root); - } -} - #[tokio::test] async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; @@ -741,7 +330,7 @@ async fn long_skip() { final_blocks as usize, BlockStrategy::ForkCanonicalChainAt { previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + first_slot: Slot::new(initial_blocks + skip_slots + 1), }, AttestationStrategy::AllValidators, ) @@ -792,8 +381,7 @@ async fn randao_genesis_storage() { .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_some()); + .any(|x| *x == genesis_value)); // Then upon adding one more block, it isn't harness.advance_slot(); @@ -804,14 +392,13 @@ async fn randao_genesis_storage() { AttestationStrategy::AllValidators, ) .await; - assert!(harness + assert!(!harness .chain .head_snapshot() .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_none()); + .any(|x| *x == genesis_value)); check_finalization(&harness, num_slots); check_split_slot(&harness, store); @@ -902,11 +489,12 @@ async fn epoch_boundary_state_attestation_processing() { .load_epoch_boundary_state(&block.state_root()) .expect("no error") .expect("epoch boundary state exists"); - let ebs_state_root = epoch_boundary_state.canonical_root().unwrap(); - let ebs_of_ebs = store + let ebs_state_root = epoch_boundary_state.update_tree_hash_cache().unwrap(); + let mut ebs_of_ebs = store .load_epoch_boundary_state(&ebs_state_root) .expect("no error") .expect("ebs of ebs exists"); + ebs_of_ebs.apply_pending_mutations().unwrap(); assert_eq!(epoch_boundary_state, ebs_of_ebs); // If the attestation is pre-finalization it should be rejected. @@ -968,10 +556,19 @@ async fn forwards_iter_block_and_state_roots_until() { check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store.clone()); - // The last restore point slot is the point at which the hybrid forwards iterator behaviour + // The freezer upper bound slot is the point at which the hybrid forwards iterator behaviour // changes. - let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap(); - assert!(last_restore_point_slot > 0); + let block_upper_bound = store + .freezer_upper_bound_for_column(DBColumn::BeaconBlockRoots, Slot::new(0)) + .unwrap() + .unwrap(); + assert!(block_upper_bound > 0); + let state_upper_bound = store + .freezer_upper_bound_for_column(DBColumn::BeaconStateRoots, Slot::new(0)) + .unwrap() + .unwrap(); + assert!(state_upper_bound > 0); + assert_eq!(state_upper_bound, block_upper_bound); let chain = &harness.chain; let head_state = harness.get_current_state(); @@ -996,14 +593,12 @@ async fn forwards_iter_block_and_state_roots_until() { }; let split_slot = store.get_split_slot(); - assert!(split_slot > last_restore_point_slot); + assert_eq!(split_slot, block_upper_bound); - test_range(Slot::new(0), last_restore_point_slot); - test_range(last_restore_point_slot, last_restore_point_slot); - test_range(last_restore_point_slot - 1, last_restore_point_slot); - test_range(Slot::new(0), last_restore_point_slot - 1); test_range(Slot::new(0), split_slot); - test_range(last_restore_point_slot - 1, split_slot); + test_range(split_slot, split_slot); + test_range(split_slot - 1, split_slot); + test_range(Slot::new(0), split_slot - 1); test_range(Slot::new(0), head_state.slot()); } @@ -1465,7 +1060,7 @@ fn check_shuffling_compatible( let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.current_epoch(), - &head_state, + head_state, ); // Check for consistency with the more expensive shuffling lookup. @@ -1505,7 +1100,7 @@ fn check_shuffling_compatible( let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.previous_epoch(), - &head_state, + head_state, ); harness .chain @@ -1533,14 +1128,11 @@ fn check_shuffling_compatible( // Targeting two epochs before the current epoch should always return false if head_state.current_epoch() >= 2 { - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, - head_state.current_epoch() - 2, - &head_state - ), - false - ); + assert!(!harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() - 2, + head_state + )); } } } @@ -1962,14 +1554,13 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig - .add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ) - .await; + let (canonical_blocks, _, _, _) = Box::pin(rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + )) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -2342,7 +1933,7 @@ async fn prune_single_block_long_skip() { 2 * slots_per_epoch, 1, 2 * slots_per_epoch, - 2 * slots_per_epoch as u64, + 2 * slots_per_epoch, 1, ) .await; @@ -2364,31 +1955,45 @@ async fn prune_shared_skip_states_mid_epoch() { #[tokio::test] async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; - pruning_test( - 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + Box::pin(pruning_test( + slots_per_epoch - 1, + 1, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + 2, slots_per_epoch, - ) + )) .await; - pruning_test( - 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + Box::pin(pruning_test( + slots_per_epoch - 1, + 2, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + 1, slots_per_epoch, - ) + )) .await; - pruning_test( + Box::pin(pruning_test( + 2 * slots_per_epoch + slots_per_epoch / 2, + slots_per_epoch / 2, + slots_per_epoch, + slots_per_epoch / 2 + 1, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( + 2 * slots_per_epoch + slots_per_epoch / 2, + slots_per_epoch / 2, + slots_per_epoch, + slots_per_epoch / 2 + 1, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( 2 * slots_per_epoch - 1, - slots_per_epoch as u64, + slots_per_epoch, 1, 0, 2 * slots_per_epoch, - ) + )) .await; } @@ -2497,7 +2102,7 @@ async fn pruning_test( ); check_chain_dump( &harness, - (num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64, + num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1, ); let all_canonical_states = harness @@ -2728,7 +2333,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await; let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - let log = test_logger(); + let log = harness.chain.logger().clone(); let temp2 = tempdir().unwrap(); let store = get_store(&temp2); let spec = test_spec::(); @@ -2830,9 +2435,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Forwards iterator from 0 should fail as we lack blocks. assert!(matches!( beacon_chain.forwards_iter_block_roots(Slot::new(0)), - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { .. } - )) + Err(BeaconChainError::HistoricalBlockOutOfRange { .. }) )); // Simulate processing of a `StatusMessage` with an older finalized epoch by calling @@ -2900,7 +2503,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .import_historical_block_batch(batch_with_invalid_first_block) .unwrap_err(), - BeaconChainError::HistoricalBlockError(HistoricalBlockError::InvalidSignature) + HistoricalBlockError::InvalidSignature )); // Importing the batch with valid signatures should succeed. @@ -2955,11 +2558,11 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { } // Anchor slot is still set to the slot of the checkpoint block. - assert_eq!(store.get_anchor_slot(), Some(wss_block.slot())); + assert_eq!(store.get_anchor_info().anchor_slot, wss_block.slot()); // Reconstruct states. - store.clone().reconstruct_historic_states().unwrap(); - assert_eq!(store.get_anchor_slot(), None); + store.clone().reconstruct_historic_states(None).unwrap(); + assert_eq!(store.get_anchor_info().anchor_slot, 0); } /// Test that blocks and attestations that refer to states around an unaligned split state are @@ -3018,8 +2621,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { harness.advance_slot(); } harness.extend_to_slot(finalizing_slot - 1).await; - harness - .add_block_at_slot(finalizing_slot, harness.get_current_state()) + Box::pin(harness.add_block_at_slot(finalizing_slot, harness.get_current_state())) .await .unwrap(); @@ -3194,6 +2796,7 @@ async fn finalizes_after_resuming_from_db() { ); } +#[allow(clippy::large_stack_frames)] #[tokio::test] async fn revert_minority_fork_on_resume() { let validator_count = 16; @@ -3385,7 +2988,6 @@ async fn schema_downgrade_to_min_version() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let spec = &harness.chain.spec.clone(); harness .extend_chain( @@ -3395,7 +2997,8 @@ async fn schema_downgrade_to_min_version() { ) .await; - let min_version = SchemaVersion(19); + let min_version = SchemaVersion(22); + let genesis_state_root = Some(harness.chain.genesis_state_root); // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -3408,25 +3011,22 @@ async fn schema_downgrade_to_min_version() { let store = get_store(&db_path); // Downgrade. - let deposit_contract_deploy_block = 0; migrate_schema::>( store.clone(), - deposit_contract_deploy_block, + genesis_state_root, CURRENT_SCHEMA_VERSION, min_version, store.logger().clone(), - spec, ) .expect("schema downgrade to minimum version should work"); // Upgrade back. migrate_schema::>( store.clone(), - deposit_contract_deploy_block, + genesis_state_root, min_version, CURRENT_SCHEMA_VERSION, store.logger().clone(), - spec, ) .expect("schema upgrade from minimum version should work"); @@ -3449,11 +3049,10 @@ async fn schema_downgrade_to_min_version() { let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); migrate_schema::>( store.clone(), - deposit_contract_deploy_block, + genesis_state_root, CURRENT_SCHEMA_VERSION, min_version_sub_1, harness.logger().clone(), - spec, ) .expect_err("should not downgrade below minimum version"); } @@ -3785,15 +3384,15 @@ async fn prune_historic_states() { ) .await; - // Check historical state is present. - let state_roots_iter = harness + // Check historical states are present. + let first_epoch_state_roots = harness .chain .forwards_iter_state_roots(Slot::new(0)) - .unwrap(); - for (state_root, slot) in state_roots_iter + .unwrap() .take(E::slots_per_epoch() as usize) .map(Result::unwrap) - { + .collect::>(); + for &(state_root, slot) in &first_epoch_state_roots { assert!(store.get_state(&state_root, Some(slot)).unwrap().is_some()); } @@ -3802,29 +3401,18 @@ async fn prune_historic_states() { .unwrap(); // Check that anchor info is updated. - let anchor_info = store.get_anchor_info().unwrap(); + let anchor_info = store.get_anchor_info(); assert_eq!(anchor_info.state_lower_limit, 0); assert_eq!(anchor_info.state_upper_limit, STATE_UPPER_LIMIT_NO_RETAIN); - // Historical states should be pruned. - let state_roots_iter = harness - .chain - .forwards_iter_state_roots(Slot::new(1)) - .unwrap(); - for (state_root, slot) in state_roots_iter - .take(E::slots_per_epoch() as usize) - .map(Result::unwrap) - { - assert!(store.get_state(&state_root, Some(slot)).unwrap().is_none()); + // Ensure all epoch 0 states other than the genesis have been pruned. + for &(state_root, slot) in &first_epoch_state_roots { + assert_eq!( + store.get_state(&state_root, Some(slot)).unwrap().is_some(), + slot == 0 + ); } - // Ensure that genesis state is still accessible - let genesis_state_root = harness.chain.genesis_state_root; - assert!(store - .get_state(&genesis_state_root, Some(Slot::new(0))) - .unwrap() - .is_some()); - // Run for another two epochs. let additional_blocks_produced = 2 * E::slots_per_epoch(); harness diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index d1b3139d42..6d30b8a4e3 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -73,7 +73,7 @@ fn get_valid_sync_committee_message_for_block( let head_state = harness.chain.head_beacon_state_cloned(); let (signature, _) = harness .make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee) - .get(0) + .first() .expect("sync messages should exist") .get(message_index) .expect("first sync message should exist") @@ -104,7 +104,7 @@ fn get_valid_sync_contribution( ); let (_, contribution_opt) = sync_contributions - .get(0) + .first() .expect("sync contributions should exist"); let contribution = contribution_opt .as_ref() diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7ae34ccf38..c641f32b82 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -170,7 +170,7 @@ async fn find_reorgs() { harness .extend_chain( - num_blocks_produced as usize, + num_blocks_produced, BlockStrategy::OnCanonicalHead, // No need to produce attestations for this test. AttestationStrategy::SomeValidators(vec![]), @@ -203,7 +203,7 @@ async fn find_reorgs() { assert_eq!( find_reorg_slot( &harness.chain, - &head_state, + head_state, harness.chain.head_beacon_block().canonical_root() ), head_slot @@ -503,7 +503,6 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) - .into_iter() .map(|validator_index| { let slot = state .get_attestation_duties(validator_index, RelativeEpoch::Current) diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 554010be07..c96e0868d7 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -4,22 +4,22 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -slog = { workspace = true } -itertools = { workspace = true } -logging = { workspace = true } -tokio = { workspace = true } -tokio-util = { workspace = true } -futures = { workspace = true } fnv = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } +lighthouse_network = { workspace = true } +logging = { workspace = true } +metrics = { workspace = true } +num_cpus = { workspace = true } +parking_lot = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } strum = { workspace = true } task_executor = { workspace = true } -slot_clock = { workspace = true } -lighthouse_network = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } types = { workspace = true } -lighthouse_metrics = { workspace = true } -parking_lot = { workspace = true } -num_cpus = { workspace = true } -serde = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["test-util"] } diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 0a7bdba18d..fc8c712f4e 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index c3658f45c7..3531e81c84 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,8 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +eth2 = { workspace = true } +lighthouse_version = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -eth2 = { workspace = true } serde = { workspace = true } -lighthouse_version = { workspace = true } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 06f7763c8a..614115eb58 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,42 +5,41 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] +operation_pool = { workspace = true } serde_yaml = { workspace = true } state_processing = { workspace = true } -operation_pool = { workspace = true } tokio = { workspace = true } [dependencies] beacon_chain = { workspace = true } -store = { workspace = true } -network = { workspace = true } -timer = { path = "../timer" } -lighthouse_network = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } -slot_clock = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -error-chain = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } +beacon_processor = { workspace = true } +directory = { workspace = true } dirs = { workspace = true } +environment = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -kzg = { workspace = true } -sensitive_url = { workspace = true } +eth2_config = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } genesis = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -lighthouse_metrics = { workspace = true } -time = "0.3.5" -directory = { workspace = true } http_api = { workspace = true } http_metrics = { path = "../http_metrics" } +kzg = { workspace = true } +lighthouse_network = { workspace = true } +metrics = { workspace = true } +monitoring_api = { workspace = true } +network = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } slasher = { workspace = true } slasher_service = { path = "../../slasher/service" } -monitoring_api = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } -ethereum_ssz = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +task_executor = { workspace = true } +time = "0.3.5" +timer = { path = "../timer" } +tokio = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d55fa668f0..f47da54288 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -7,7 +7,6 @@ use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; -use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -970,7 +969,6 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); - start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); start_availability_cache_maintenance_service( runtime_context.executor.clone(), beacon_chain.clone(), @@ -1060,21 +1058,21 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); - let inner_spec = spec.clone(); - let deposit_contract_deploy_block = context + // Optionally grab the genesis state root. + // This will only be required if a DB upgrade to V22 is needed. + let genesis_state_root = context .eth2_network_config .as_ref() - .map(|config| config.deposit_contract_deploy_block) - .unwrap_or(0); + .and_then(|config| config.genesis_state_root::().transpose()) + .transpose()?; let schema_upgrade = |db, from, to| { migrate_schema::>( db, - deposit_contract_deploy_block, + genesis_state_root, from, to, log, - &inner_spec, ) }; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index a25216ff3e..becc781ed3 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -59,10 +59,6 @@ pub struct Config { /// Path where the blobs database will be located if blobs should be in a separate database. pub blobs_db_path: Option, pub log_file: PathBuf, - /// If true, the node will use co-ordinated junk for eth1 values. - /// - /// This is the method used for the 2019 client interop in Canada. - pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, /// Graffiti to be inserted everytime we create a block if the validator doesn't specify. pub beacon_graffiti: GraffitiOrigin, @@ -103,8 +99,7 @@ impl Default for Config { store: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), - dummy_eth1_backend: false, - sync_eth1_chain: false, + sync_eth1_chain: true, eth1: <_>::default(), execution_layer: None, trusted_setup, diff --git a/beacon_node/client/src/error.rs b/beacon_node/client/src/error.rs deleted file mode 100644 index 20cf6f9877..0000000000 --- a/beacon_node/client/src/error.rs +++ /dev/null @@ -1,7 +0,0 @@ -use error_chain::error_chain; - -error_chain! { - links { - Network(network::error::Error, network::error::ErrorKind); - } -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e6042103e1..0b6550c208 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -4,7 +4,6 @@ mod metrics; mod notifier; pub mod builder; -pub mod error; use beacon_chain::BeaconChain; use lighthouse_network::{Enr, Multiaddr, NetworkGlobals}; diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index ebc4fe70a7..e5c07baddc 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SYNC_SLOTS_PER_SECOND: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 839d296c76..f686c2c650 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -45,10 +45,7 @@ pub fn spawn_notifier( let mut current_sync_state = network.sync_state(); // Store info if we are required to do a backfill sync. - let original_anchor_slot = beacon_chain - .store - .get_anchor_info() - .map(|ai| ai.oldest_block_slot); + let original_oldest_block_slot = beacon_chain.store.get_anchor_info().oldest_block_slot; let interval_future = async move { // Perform pre-genesis logging. @@ -141,22 +138,17 @@ pub fn spawn_notifier( match current_sync_state { SyncState::BackFillSyncing { .. } => { // Observe backfilling sync info. - if let Some(oldest_slot) = original_anchor_slot { - if let Some(current_anchor_slot) = beacon_chain - .store - .get_anchor_info() - .map(|ai| ai.oldest_block_slot) - { - sync_distance = current_anchor_slot - .saturating_sub(beacon_chain.genesis_backfill_slot); - speedo - // For backfill sync use a fake slot which is the distance we've progressed from the starting `oldest_block_slot`. - .observe( - oldest_slot.saturating_sub(current_anchor_slot), - Instant::now(), - ); - } - } + let current_oldest_block_slot = + beacon_chain.store.get_anchor_info().oldest_block_slot; + sync_distance = current_oldest_block_slot + .saturating_sub(beacon_chain.genesis_backfill_slot); + speedo + // For backfill sync use a fake slot which is the distance we've progressed + // from the starting `original_oldest_block_slot`. + .observe( + original_oldest_block_slot.saturating_sub(current_oldest_block_slot), + Instant::now(), + ); } SyncState::SyncingFinalized { .. } | SyncState::SyncingHead { .. } @@ -213,14 +205,14 @@ pub fn spawn_notifier( "Downloading historical blocks"; "distance" => distance, "speed" => sync_speed_pretty(speed), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_oldest_block_slot.saturating_sub(beacon_chain.genesis_backfill_slot))), ); } else { info!( log, "Downloading historical blocks"; "distance" => distance, - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_oldest_block_slot.saturating_sub(beacon_chain.genesis_backfill_slot))), ); } } else if !is_backfilling && last_backfill_log_slot.is_some() { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 4910cfd2e1..8ccd50aad8 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -5,27 +5,27 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] +environment = { workspace = true } eth1_test_rig = { workspace = true } serde_yaml = { workspace = true } sloggers = { workspace = true } -environment = { workspace = true } [dependencies] -execution_layer = { workspace = true } -futures = { workspace = true } -serde = { workspace = true } -types = { workspace = true } -merkle_proof = { workspace = true } +eth2 = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -parking_lot = { workspace = true } -slog = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } logging = { workspace = true } -superstruct = { workspace = true } -tokio = { workspace = true } -state_processing = { workspace = true } -lighthouse_metrics = { workspace = true } -task_executor = { workspace = true } -eth2 = { workspace = true } +merkle_proof = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } sensitive_url = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +state_processing = { workspace = true } +superstruct = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index 9a11e7a692..1df4ba0df9 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 843a7b83cb..7eb7b4a15e 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -2,54 +2,53 @@ name = "execution_layer" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +alloy-consensus = { workspace = true } alloy-primitives = { workspace = true } -types = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } -reqwest = { workspace = true } -ethereum_serde_utils = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } -warp = { workspace = true } -jsonwebtoken = "9" +alloy-rlp = { workspace = true } +arc-swap = "1.6.0" +builder_client = { path = "../builder_client" } bytes = { workspace = true } -task_executor = { workspace = true } -hex = { workspace = true } -ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } eth2 = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethers-core = { workspace = true } +fixed_bytes = { workspace = true } +fork_choice = { workspace = true } +hash-db = "0.15.2" +hash256-std-hasher = "0.15.2" +hex = { workspace = true } +jsonwebtoken = "9" +keccak-hash = "0.10.0" kzg = { workspace = true } -state_processing = { workspace = true } -superstruct = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } lru = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +pretty_reqwest_error = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +ssz_types = { workspace = true } +state_processing = { workspace = true } +strum = { workspace = true } +superstruct = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -parking_lot = { workspace = true } -slot_clock = { workspace = true } -tempfile = { workspace = true } -rand = { workspace = true } -zeroize = { workspace = true } -lighthouse_metrics = { workspace = true } -ethers-core = { workspace = true } -builder_client = { path = "../builder_client" } -fork_choice = { workspace = true } -tokio-stream = { workspace = true } -strum = { workspace = true } -keccak-hash = "0.10.0" -hash256-std-hasher = "0.15.2" triehash = "0.8.4" -hash-db = "0.15.2" -pretty_reqwest_error = { workspace = true } -arc-swap = "1.6.0" -eth2_network_config = { workspace = true } -alloy-rlp = { workspace = true } -alloy-consensus = { workspace = true } -lighthouse_version = { workspace = true } -fixed_bytes = { workspace = true } -sha2 = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +zeroize = { workspace = true } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index cdc172cff4..d3a32c7929 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -7,7 +7,7 @@ use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ EncodableExecutionBlockHeader, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayloadRef, Hash256, + ExecutionPayloadRef, ExecutionRequests, Hash256, }; /// Calculate the block hash of an execution block. @@ -17,6 +17,7 @@ use types::{ pub fn calculate_execution_block_hash( payload: ExecutionPayloadRef, parent_beacon_block_root: Option, + execution_requests: Option<&ExecutionRequests>, ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a @@ -38,6 +39,7 @@ pub fn calculate_execution_block_hash( let rlp_blob_gas_used = payload.blob_gas_used().ok(); let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + let requests_root = execution_requests.map(|requests| requests.requests_hash()); // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( @@ -48,6 +50,7 @@ pub fn calculate_execution_block_hash( rlp_blob_gas_used, rlp_excess_blob_gas, parent_beacon_block_root, + requests_root, ); // Hash the RLP encoding of the block header. @@ -118,6 +121,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -149,6 +153,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -181,6 +186,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") @@ -211,6 +217,7 @@ mod test { blob_gas_used: Some(0x0u64), excess_blob_gas: Some(0x0u64), parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + requests_root: None, }; let expected_hash = Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") @@ -221,29 +228,30 @@ mod test { #[test] fn test_rlp_encode_block_electra() { let header = ExecutionBlockHeader { - parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(), + parent_hash: Hash256::from_str("a628f146df398a339768bd101f7dc41d828be79aca5dd02cc878a51bdbadd761").unwrap(), ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), - beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(), - state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(), - transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + beneficiary: Address::from_str("f97e180c050e5ab072211ad2c213eb5aee4df134").unwrap(), + state_root: Hash256::from_str("fdff009f8280bd113ebb4df8ce4e2dcc9322d43184a0b506e70b7f4823ca1253").unwrap(), + transactions_root: Hash256::from_str("452806578b4fa881cafb019c47e767e37e2249accf859159f00cddefb2579bb5").unwrap(), + receipts_root: Hash256::from_str("72ceac0f16a32041c881b3220d39ca506a286bef163c01a4d0821cd4027d31c7").unwrap(), + logs_bloom:<[u8; 256]>::from_hex("10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000").unwrap().into(), difficulty: Uint256::ZERO, - number: Uint256::from(97), - gas_limit: Uint256::from(27482534), - gas_used: Uint256::ZERO, - timestamp: 1692132829u64, - extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), - mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), + number: Uint256::from(8230), + gas_limit: Uint256::from(30000000), + gas_used: Uint256::from(3716848), + timestamp: 1730921268, + extra_data: hex::decode("d883010e0c846765746888676f312e32332e32856c696e7578").unwrap(), + mix_hash: Hash256::from_str("e87ca9a45b2e61bbe9080d897db1d584b5d2367d22e898af901091883b7b96ec").unwrap(), nonce: Hash64::ZERO, - base_fee_per_gas: Uint256::from(2374u64), + base_fee_per_gas: Uint256::from(7u64), withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), - blob_gas_used: Some(0x0u64), - excess_blob_gas: Some(0x0u64), - parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + blob_gas_used: Some(786432), + excess_blob_gas: Some(44695552), + parent_beacon_block_root: Some(Hash256::from_str("f3a888fee010ebb1ae083547004e96c254b240437823326fdff8354b1fc25629").unwrap()), + requests_root: Some(Hash256::from_str("9440d3365f07573919e1e9ac5178c20ec6fe267357ee4baf8b6409901f331b62").unwrap()), }; let expected_hash = - Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") + Hash256::from_str("61e67afc96bf21be6aab52c1ace1db48de7b83f03119b0644deb4b69e87e09e1") .unwrap(); test_rlp_encoding(&header, None, expected_hash); } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 1c23c8ba66..083aaf2e25 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,7 +1,7 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_BLOBS_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, @@ -507,6 +507,7 @@ pub struct EngineCapabilities { pub get_payload_v3: bool, pub get_payload_v4: bool, pub get_client_version_v1: bool, + pub get_blobs_v1: bool, } impl EngineCapabilities { @@ -554,6 +555,9 @@ impl EngineCapabilities { if self.get_client_version_v1 { response.push(ENGINE_GET_CLIENT_VERSION_V1); } + if self.get_blobs_v1 { + response.push(ENGINE_GET_BLOBS_V1); + } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 9c2c43bcf7..33dc60d037 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -58,6 +58,9 @@ pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1 pub const ENGINE_GET_CLIENT_VERSION_V1: &str = "engine_getClientVersionV1"; pub const ENGINE_GET_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_GET_BLOBS_V1: &str = "engine_getBlobsV1"; +pub const ENGINE_GET_BLOBS_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; /// This code is returned by all clients when a method is not supported @@ -79,6 +82,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_CLIENT_VERSION_V1, + ENGINE_GET_BLOBS_V1, ]; /// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 @@ -702,6 +706,20 @@ impl HttpJsonRpc { } } + pub async fn get_blobs( + &self, + versioned_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([versioned_hashes]); + + self.rpc_request( + ENGINE_GET_BLOBS_V1, + params, + ENGINE_GET_BLOBS_TIMEOUT * self.execution_timeout_multiplier, + ) + .await + } + pub async fn get_block_by_number<'a>( &self, query: BlockByNumberQuery<'a>, @@ -794,7 +812,7 @@ impl HttpJsonRpc { new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, new_payload_request_electra - .execution_requests_list + .execution_requests .get_execution_requests_list(), ]); @@ -1067,6 +1085,7 @@ impl HttpJsonRpc { get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), get_payload_v4: capabilities.contains(ENGINE_GET_PAYLOAD_V4), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), + get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), }) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 753554c149..1c6639804e 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -6,8 +6,10 @@ use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; -use types::{FixedVector, Unsigned}; +use types::execution_requests::{ + ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests, +}; +use types::{Blob, FixedVector, KzgProof, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -339,25 +341,6 @@ impl From> for ExecutionPayload { } } -/// This is used to index into the `execution_requests` array. -#[derive(Debug, Copy, Clone)] -enum RequestPrefix { - Deposit, - Withdrawal, - Consolidation, -} - -impl RequestPrefix { - pub fn from_prefix(prefix: u8) -> Option { - match prefix { - 0 => Some(Self::Deposit), - 1 => Some(Self::Withdrawal), - 2 => Some(Self::Consolidation), - _ => None, - } - } -} - /// Format of `ExecutionRequests` received over the engine api. /// /// Array of ssz-encoded requests list encoded as hex bytes. @@ -379,7 +362,8 @@ impl TryFrom for ExecutionRequests { for (i, request) in value.0.into_iter().enumerate() { // hex string - let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request)) + .map_err(|e| format!("Invalid hex {:?}", e))?; match RequestPrefix::from_prefix(i as u8) { Some(RequestPrefix::Deposit) => { requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) @@ -431,7 +415,7 @@ pub struct JsonGetPayloadResponse { #[superstruct(only(V3, V4))] pub should_override_builder: bool, #[superstruct(only(V4))] - pub requests: JsonExecutionRequests, + pub execution_requests: JsonExecutionRequests, } impl TryFrom> for GetPayloadResponse { @@ -464,7 +448,7 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.requests.try_into()?, + requests: response.execution_requests.try_into()?, })) } } @@ -625,6 +609,14 @@ impl From> for BlobsBundle { } } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec", rename_all = "camelCase")] +pub struct BlobAndProofV1 { + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub blob: Blob, + pub proof: KzgProof, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceStateV1 { diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 318779b7f3..60bc848974 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -44,7 +44,7 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, #[superstruct(only(Electra))] - pub execution_requests_list: &'block ExecutionRequests, + pub execution_requests: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -121,8 +121,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - let (header_hash, rlp_transactions_root) = - calculate_execution_block_hash(payload, parent_beacon_block_root); + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( + payload, + parent_beacon_block_root, + self.execution_requests().ok().copied(), + ); if header_hash != self.block_hash() { return Err(Error::BlockHashMismatch { @@ -185,7 +188,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, - execution_requests_list: &block_ref.body.execution_requests, + execution_requests: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f7e490233f..ae0dca9833 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,6 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::json_structures::BlobAndProofV1; use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{strip_prefix, Auth, JwtKey}; @@ -27,7 +28,7 @@ use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; +use std::collections::{hash_map::Entry, HashMap}; use std::fmt; use std::future::Future; use std::io::Write; @@ -65,7 +66,7 @@ mod metrics; pub mod payload_cache; mod payload_status; pub mod test_utils; -mod versioned_hashes; +pub mod versioned_hashes; /// Indicates the default jwt authenticated execution endpoint. pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; @@ -318,10 +319,52 @@ impl> BlockProposalContents { + pub parent_hash: ExecutionBlockHash, + pub parent_gas_limit: u64, + pub proposer_gas_limit: Option, + pub payload_attributes: &'a PayloadAttributes, + pub forkchoice_update_params: &'a ForkchoiceUpdateParameters, + pub current_fork: ForkName, +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, preparation_data: ProposerPreparationData, + gas_limit: Option, +} + +impl ProposerPreparationDataEntry { + pub fn update(&mut self, updated: Self) -> bool { + let mut changed = false; + // Update `gas_limit` if `updated.gas_limit` is `Some` and: + // - `self.gas_limit` is `None`, or + // - both are `Some` but the values differ. + if let Some(updated_gas_limit) = updated.gas_limit { + if self.gas_limit != Some(updated_gas_limit) { + self.gas_limit = Some(updated_gas_limit); + changed = true; + } + } + + // Update `update_epoch` if it differs + if self.update_epoch != updated.update_epoch { + self.update_epoch = updated.update_epoch; + changed = true; + } + + // Update `preparation_data` if it differs + if self.preparation_data != updated.preparation_data { + self.preparation_data = updated.preparation_data; + changed = true; + } + + changed + } } #[derive(Hash, PartialEq, Eq)] @@ -710,23 +753,29 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - pub async fn update_proposer_preparation( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) { + pub async fn update_proposer_preparation<'a, I>(&self, update_epoch: Epoch, proposer_data: I) + where + I: IntoIterator)>, + { let mut proposer_preparation_data = self.proposer_preparation_data().await; - for preparation_entry in preparation_data { + + for (preparation_entry, gas_limit) in proposer_data { let new = ProposerPreparationDataEntry { update_epoch, preparation_data: preparation_entry.clone(), + gas_limit: *gas_limit, }; - let existing = - proposer_preparation_data.insert(preparation_entry.validator_index, new.clone()); - - if existing != Some(new) { - metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + match proposer_preparation_data.entry(preparation_entry.validator_index) { + Entry::Occupied(mut entry) => { + if entry.get_mut().update(new) { + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } + } + Entry::Vacant(entry) => { + entry.insert(new); + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } } } } @@ -808,6 +857,13 @@ impl ExecutionLayer { } } + pub async fn get_proposer_gas_limit(&self, proposer_index: u64) -> Option { + self.proposer_preparation_data() + .await + .get(&proposer_index) + .and_then(|entry| entry.gas_limit) + } + /// Maps to the `engine_getPayload` JSON-RPC call. /// /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing @@ -817,14 +873,10 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] pub async fn get_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, spec: &ChainSpec, builder_boost_factor: Option, block_production_version: BlockProductionVersion, @@ -832,11 +884,8 @@ impl ExecutionLayer { let payload_result_type = match block_production_version { BlockProductionVersion::V3 => match self .determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - current_fork, builder_boost_factor, spec, ) @@ -856,25 +905,11 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::GET_BLINDED_PAYLOAD], ); - self.determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, - builder_params, - current_fork, - None, - spec, - ) - .await? + self.determine_and_fetch_payload(payload_parameters, builder_params, None, spec) + .await? } BlockProductionVersion::FullV2 => self - .get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - noop, - ) + .get_full_payload_with(payload_parameters, noop) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local)?, @@ -921,17 +956,15 @@ impl ExecutionLayer { async fn fetch_builder_and_local_payloads( &self, builder: &BuilderHttpClient, - parent_hash: ExecutionBlockHash, builder_params: &BuilderParams, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> ( Result>>, builder_client::Error>, Result, Error>, ) { let slot = builder_params.slot; let pubkey = &builder_params.pubkey; + let parent_hash = payload_parameters.parent_hash; info!( self.log(), @@ -949,17 +982,12 @@ impl ExecutionLayer { .await }), timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { - self.get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) - .await - .and_then(|local_result_type| match local_result_type { - GetPayloadResponseType::Full(payload) => Ok(payload), - GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), - }) + self.get_full_payload_caching(payload_parameters) + .await + .and_then(|local_result_type| match local_result_type { + GetPayloadResponseType::Full(payload) => Ok(payload), + GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), + }) }) ); @@ -983,26 +1011,17 @@ impl ExecutionLayer { (relay_result, local_result) } - #[allow(clippy::too_many_arguments)] async fn determine_and_fetch_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, builder_boost_factor: Option, spec: &ChainSpec, ) -> Result>, Error> { let Some(builder) = self.builder() else { // no builder.. return local payload return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); @@ -1033,26 +1052,15 @@ impl ExecutionLayer { ), } return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); } + let parent_hash = payload_parameters.parent_hash; let (relay_result, local_result) = self - .fetch_builder_and_local_payloads( - builder.as_ref(), - parent_hash, - &builder_params, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .fetch_builder_and_local_payloads(builder.as_ref(), &builder_params, payload_parameters) .await; match (relay_result, local_result) { @@ -1117,14 +1125,9 @@ impl ExecutionLayer { ); // check relay payload validity - if let Err(reason) = verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - Some(local.block_number()), - current_fork, - spec, - ) { + if let Err(reason) = + verify_builder_bid(&relay, payload_parameters, Some(local.block_number()), spec) + { // relay payload invalid -> return local metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -1201,14 +1204,7 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); - match verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - None, - current_fork, - spec, - ) { + match verify_builder_bid(&relay, payload_parameters, None, spec) { Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), Err(reason) => { metrics::inc_counter_vec( @@ -1233,32 +1229,28 @@ impl ExecutionLayer { /// Get a full payload and cache its result in the execution layer's payload cache. async fn get_full_payload_caching( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> Result, Error> { - self.get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - Self::cache_payload, - ) - .await + self.get_full_payload_with(payload_parameters, Self::cache_payload) + .await } async fn get_full_payload_with( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, cache_fn: fn( &ExecutionLayer, PayloadContentsRefTuple, ) -> Option>, ) -> Result, Error> { + let PayloadParameters { + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + .. + } = payload_parameters; + self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine @@ -1857,6 +1849,23 @@ impl ExecutionLayer { } } + pub async fn get_blobs( + &self, + query: Vec, + ) -> Result>>, Error> { + let capabilities = self.get_engine_capabilities(None).await?; + + if capabilities.get_blobs_v1 { + self.engine() + .request(|engine| async move { engine.api.get_blobs(query).await }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Ok(vec![None; query.len()]) + } + } + pub async fn get_block_by_number( &self, query: BlockByNumberQuery<'_>, @@ -1966,6 +1975,10 @@ enum InvalidBuilderPayload { payload: Option, expected: Option, }, + GasLimitMismatch { + payload: u64, + expected: u64, + }, } impl fmt::Display for InvalidBuilderPayload { @@ -2004,19 +2017,51 @@ impl fmt::Display for InvalidBuilderPayload { opt_string(expected) ) } + InvalidBuilderPayload::GasLimitMismatch { payload, expected } => { + write!(f, "payload gas limit was {} not {}", payload, expected) + } } } } +/// Calculate the expected gas limit for a block. +pub fn expected_gas_limit( + parent_gas_limit: u64, + target_gas_limit: u64, + spec: &ChainSpec, +) -> Option { + // Calculate the maximum gas limit difference allowed safely + let max_gas_limit_difference = parent_gas_limit + .checked_div(spec.gas_limit_adjustment_factor) + .and_then(|result| result.checked_sub(1)) + .unwrap_or(0); + + // Adjust the gas limit safely + if target_gas_limit > parent_gas_limit { + let gas_diff = target_gas_limit.saturating_sub(parent_gas_limit); + parent_gas_limit.checked_add(std::cmp::min(gas_diff, max_gas_limit_difference)) + } else { + let gas_diff = parent_gas_limit.saturating_sub(target_gas_limit); + parent_gas_limit.checked_sub(std::cmp::min(gas_diff, max_gas_limit_difference)) + } +} + /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. fn verify_builder_bid( bid: &ForkVersionedResponse>, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, + payload_parameters: PayloadParameters<'_>, block_number: Option, - current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { + let PayloadParameters { + parent_hash, + payload_attributes, + current_fork, + parent_gas_limit, + proposer_gas_limit, + .. + } = payload_parameters; + let is_signature_valid = bid.data.verify_signature(spec); let header = &bid.data.message.header(); @@ -2032,6 +2077,8 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); let payload_withdrawals_root = header.withdrawals_root().ok(); + let expected_gas_limit = proposer_gas_limit + .and_then(|target_gas_limit| expected_gas_limit(parent_gas_limit, target_gas_limit, spec)); if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { @@ -2068,6 +2115,14 @@ fn verify_builder_bid( payload: payload_withdrawals_root, expected: expected_withdrawals_root, })) + } else if expected_gas_limit + .map(|gas_limit| header.gas_limit() != gas_limit) + .unwrap_or(false) + { + Err(Box::new(InvalidBuilderPayload::GasLimitMismatch { + payload: header.gas_limit(), + expected: expected_gas_limit.unwrap_or(0), + })) } else { Ok(()) } @@ -2120,6 +2175,27 @@ mod test { .await; } + #[tokio::test] + async fn test_expected_gas_limit() { + let spec = ChainSpec::mainnet(); + assert_eq!( + expected_gas_limit(30_000_000, 30_000_000, &spec), + Some(30_000_000) + ); + assert_eq!( + expected_gas_limit(30_000_000, 40_000_000, &spec), + Some(30_029_295) + ); + assert_eq!( + expected_gas_limit(30_029_295, 40_000_000, &spec), + Some(30_058_619) + ); + assert_eq!( + expected_gas_limit(30_058_619, 30_000_000, &spec), + Some(30_029_266) + ); + } + #[tokio::test] async fn test_forked_terminal_block() { let runtime = TestRuntime::default(); diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 184031af4d..ab1a22677f 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub const HIT: &str = "hit"; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index fc01c6c579..f5e3e560ec 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -28,8 +28,8 @@ use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); -const GAS_LIMIT: u64 = 16384; -const GAS_USED: u64 = GAS_LIMIT - 1; +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; #[derive(Clone, Debug, PartialEq)] #[allow(clippy::large_enum_variant)] // This struct is only for testing. @@ -38,6 +38,10 @@ pub enum Block { PoS(ExecutionPayload), } +pub fn mock_el_extra_data() -> types::VariableList { + "block gen was here".as_bytes().to_vec().into() +} + impl Block { pub fn block_number(&self) -> u64 { match self { @@ -67,6 +71,13 @@ impl Block { } } + pub fn gas_limit(&self) -> u64 { + match self { + Block::PoW(_) => DEFAULT_GAS_LIMIT, + Block::PoS(payload) => payload.gas_limit(), + } + } + pub fn as_execution_block(&self, total_difficulty: Uint256) -> ExecutionBlock { match self { Block::PoW(block) => ExecutionBlock { @@ -570,10 +581,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -587,10 +598,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -603,10 +614,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -623,10 +634,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -642,10 +653,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 786ac9ad9c..9365024ffb 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -374,7 +374,7 @@ pub async fn handle_rpc( .into(), should_override_builder: false, // TODO(electra): add EL requests in mock el - requests: Default::default(), + execution_requests: Default::default(), }) .unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 341daedbc8..879b54eb07 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; -use crate::{Config, ExecutionLayer, PayloadAttributes}; +use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; use fork_choice::ForkchoiceUpdateParameters; @@ -54,6 +54,10 @@ impl Operation { } } +pub fn mock_builder_extra_data() -> types::VariableList { + "mock_builder".as_bytes().to_vec().into() +} + #[derive(Debug)] // We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`. struct Custom(#[allow(dead_code)] String); @@ -72,6 +76,8 @@ pub trait BidStuff { fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; + + fn stamp_payload(&mut self); } impl BidStuff for BuilderBid { @@ -203,6 +209,29 @@ impl BidStuff for BuilderBid { let message = self.signing_root(domain); sk.sign(message) } + + // this helps differentiate a builder block from a regular block + fn stamp_payload(&mut self) { + let extra_data = mock_builder_extra_data::(); + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Electra(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + } + } } #[derive(Clone)] @@ -286,6 +315,7 @@ impl MockBuilder { while let Some(op) = guard.pop() { op.apply(bid); } + bid.stamp_payload(); } } @@ -413,11 +443,12 @@ pub fn serve( let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block + let head_execution_payload = block .body() .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); + .map_err(|_| reject("pre-merge block"))?; + let head_execution_hash = head_execution_payload.block_hash(); + let head_gas_limit = head_execution_payload.gas_limit(); if head_execution_hash != parent_hash { return Err(reject("head mismatch")); } @@ -529,14 +560,24 @@ pub fn serve( finalized_hash: Some(finalized_execution_hash), }; + let proposer_gas_limit = builder + .val_registration_cache + .read() + .get(&pubkey) + .map(|v| v.message.gas_limit); + + let payload_parameters = PayloadParameters { + parent_hash: head_execution_hash, + parent_gas_limit: head_gas_limit, + proposer_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let payload_response_type = builder .el - .get_full_payload_caching( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) + .get_full_payload_caching(payload_parameters) .await .map_err(|_| reject("couldn't get payload"))?; @@ -648,8 +689,6 @@ pub fn serve( } }; - message.set_gas_limit(cached_data.gas_limit); - builder.apply_operations(&mut message); let mut signature = diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index a9f1313e46..48372a39be 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -90,6 +90,7 @@ impl MockExecutionLayer { }; let parent_hash = latest_execution_block.block_hash(); + let parent_gas_limit = latest_execution_block.gas_limit(); let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); @@ -131,14 +132,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::FullV2, @@ -171,14 +178,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::BlindedV2, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index be99b38054..faf6d4ef0b 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,12 +25,13 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; +pub use execution_block_generator::DEFAULT_GAS_LIMIT; pub use execution_block_generator::{ generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, - static_valid_tx, Block, ExecutionBlockGenerator, + mock_el_extra_data, static_valid_tx, Block, ExecutionBlockGenerator, }; pub use hook::Hook; -pub use mock_builder::{MockBuilder, Operation}; +pub use mock_builder::{mock_builder_extra_data, MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; @@ -53,6 +54,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v3: true, get_payload_v4: true, get_client_version_v1: true, + get_blobs_v1: true, }; pub static DEFAULT_CLIENT_VERSION: LazyLock = diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index f3779f0e4a..5d601008bc 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -6,49 +6,49 @@ edition = { workspace = true } autotests = false # using a single test binary compiles faster [dependencies] -warp = { workspace = true } -serde = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -types = { workspace = true } -hex = { workspace = true } beacon_chain = { workspace = true } -eth2 = { workspace = true } -slog = { workspace = true } -network = { workspace = true } -lighthouse_network = { workspace = true } -eth1 = { workspace = true } -state_processing = { workspace = true } -lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } -warp_utils = { workspace = true } -slot_clock = { workspace = true } -ethereum_ssz = { workspace = true } +beacon_processor = { workspace = true } bs58 = "0.4.0" -futures = { workspace = true } +bytes = { workspace = true } +directory = { workspace = true } +eth1 = { workspace = true } +eth2 = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } execution_layer = { workspace = true } -parking_lot = { workspace = true } -safe_arith = { workspace = true } -task_executor = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +lighthouse_network = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } lru = { workspace = true } -tree_hash = { workspace = true } +metrics = { workspace = true } +network = { workspace = true } +operation_pool = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } +safe_arith = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +state_processing = { workspace = true } +store = { workspace = true } sysinfo = { workspace = true } system_health = { path = "../../common/system_health" } -directory = { workspace = true } -logging = { workspace = true } -ethereum_serde_utils = { workspace = true } -operation_pool = { workspace = true } -sensitive_url = { workspace = true } -store = { workspace = true } -bytes = { workspace = true } -beacon_processor = { workspace = true } -rand = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } [dev-dependencies] -serde_json = { workspace = true } -proto_array = { workspace = true } genesis = { workspace = true } logging = { workspace = true } +proto_array = { workspace = true } +serde_json = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 307584b82d..23d177da78 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2693,24 +2693,37 @@ pub fn serve( .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) + .and(log_filter.clone()) .then( |endpoint_version: EndpointVersion, state_id: StateId, accept_header: Option, task_spawner: TaskSpawner, - chain: Arc>| { + chain: Arc>, + log: Logger| { task_spawner.blocking_response_task(Priority::P1, move || match accept_header { Some(api_types::Accept::Ssz) => { // We can ignore the optimistic status for the "fork" since it's a // specification constant that doesn't change across competing heads of the // beacon chain. + let t = std::time::Instant::now(); let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + let timer = metrics::start_timer(&metrics::HTTP_API_STATE_SSZ_ENCODE_TIMES); + let response_bytes = state.as_ssz_bytes(); + drop(timer); + debug!( + log, + "HTTP state load"; + "total_time_ms" => t.elapsed().as_millis(), + "target_slot" => state.slot() + ); + Response::builder() .status(200) - .body(state.as_ssz_bytes().into()) + .body(response_bytes.into()) .map(|res: Response| add_ssz_content_type_header(res)) .map(|resp: warp::reply::Response| { add_consensus_version_header(resp, fork_name) @@ -3691,7 +3704,10 @@ pub fn serve( ); execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) .await; chain @@ -3749,7 +3765,7 @@ pub fn serve( let spec = &chain.spec; let (preparation_data, filtered_registration_data): ( - Vec, + Vec<(ProposerPreparationData, Option)>, Vec, ) = register_val_data .into_iter() @@ -3779,12 +3795,15 @@ pub fn serve( // Filter out validators who are not 'active' or 'pending'. is_active_or_pending.then_some({ ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), register_data, ) }) @@ -3794,7 +3813,10 @@ pub fn serve( // Update the prepare beacon proposer cache based on this request. execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) .await; // Call prepare beacon proposer blocking with the latest update in order to make diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 970eef8dd0..767931a747 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static HTTP_API_PATHS_TOTAL: LazyLock> = LazyLock::new(|| { @@ -39,3 +39,15 @@ pub static HTTP_API_BLOCK_GOSSIP_TIMES: LazyLock> = LazyLoc &["provenance"], ) }); +pub static HTTP_API_STATE_SSZ_ENCODE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "http_api_state_ssz_encode_times", + "Time to SSZ encode a BeaconState for a response", + ) +}); +pub static HTTP_API_STATE_ROOT_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "http_api_state_root_times", + "Time to load a state root for a request", + ) +}); diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index fceeb2dd23..b5aa23acf8 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,4 +1,5 @@ use crate::metrics; +use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; @@ -13,9 +14,10 @@ use eth2::types::{ PublishBlockRequest, SignedBlockContents, }; use execution_layer::ProvenancedPayload; +use futures::TryFutureExt; use lighthouse_network::{NetworkGlobals, PubsubMessage}; use network::NetworkMessage; -use rand::seq::SliceRandom; +use rand::prelude::SliceRandom; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -26,9 +28,8 @@ use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, - DataColumnSidecarList, DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, - FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, - SignedBlindedBeaconBlock, + DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, + FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, SignedBlindedBeaconBlock, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; @@ -97,14 +98,9 @@ pub async fn publish_block>( }; let block = unverified_block.inner_block(); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); - let malicious_withhold_count = chain.config.malicious_withhold_count; - let chain_cloned = chain.clone(); /* actually publish a block */ let publish_block_p2p = move |block: Arc>, - should_publish_block: bool, - blob_sidecars: Vec>>, - mut data_column_sidecars: DataColumnSidecarList, sender, log, seen_timestamp| @@ -120,53 +116,16 @@ pub async fn publish_block>( publish_delay, ); - let mut pubsub_messages = if should_publish_block { - info!( - log, - "Signed block published to network via HTTP API"; - "slot" => block.slot(), - "blobs_published" => blob_sidecars.len(), - "publish_delay_ms" => publish_delay.as_millis(), - ); - vec![PubsubMessage::BeaconBlock(block.clone())] - } else { - vec![] - }; + info!( + log, + "Signed block published to network via HTTP API"; + "slot" => block.slot(), + "publish_delay_ms" => publish_delay.as_millis(), + ); - match block.as_ref() { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => { - crate::publish_pubsub_messages(&sender, pubsub_messages) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; - } - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { - for blob in blob_sidecars.into_iter() { - pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new((blob.index, blob)))); - } - if malicious_withhold_count > 0 { - let columns_to_keep = data_column_sidecars - .len() - .saturating_sub(malicious_withhold_count); - // Randomize columns before dropping the last malicious_withhold_count items - data_column_sidecars.shuffle(&mut rand::thread_rng()); - drop(data_column_sidecars.drain(columns_to_keep..)); - } + crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; - for data_col in data_column_sidecars { - let subnet = DataColumnSubnetId::from_column_index::( - data_col.index as usize, - &chain_cloned.spec, - ); - pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( - subnet, data_col, - )))); - } - crate::publish_pubsub_messages(&sender, pubsub_messages) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; - } - }; Ok(()) }; @@ -174,145 +133,11 @@ pub async fn publish_block>( let slot = block.message().slot(); let sender_clone = network_tx.clone(); - // Convert blobs to either: - // - // 1. Blob sidecars if prior to peer DAS, or - // 2. Data column sidecars if post peer DAS. - let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - - let (blob_sidecars, data_column_sidecars) = match unverified_blobs { - // Pre-PeerDAS: construct blob sidecars for the network. - Some((kzg_proofs, blobs)) if !peer_das_enabled => { - let blob_sidecars = kzg_proofs - .into_iter() - .zip(blobs) - .enumerate() - .map(|(i, (proof, unverified_blob))| { - let _timer = metrics::start_timer( - &beacon_chain::metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION, - ); - let blob_sidecar = - BlobSidecar::new(i, unverified_blob, &block, proof).map(Arc::new); - blob_sidecar.map_err(|e| { - error!( - log, - "Invalid blob - not publishing block"; - "error" => ?e, - "blob_index" => i, - "slot" => slot, - ); - warp_utils::reject::custom_bad_request(format!("{e:?}")) - }) - }) - .collect::, Rejection>>()?; - (blob_sidecars, vec![]) - } - // Post PeerDAS: construct data columns. - Some((_, blobs)) => { - // TODO(das): this is sub-optimal and should likely not be happening prior to gossip - // block publishing. - let data_column_sidecars = build_blob_data_column_sidecars(&chain, &block, blobs) - .map_err(|e| { - error!( - log, - "Invalid data column - not publishing block"; - "error" => ?e, - "slot" => slot - ); - warp_utils::reject::custom_bad_request(format!("{e:?}")) - })?; - (vec![], data_column_sidecars) - } - None => (vec![], vec![]), - }; + let build_sidecar_task_handle = + spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs, log.clone())?; // Gossip verify the block and blobs/data columns separately. let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); - let gossip_verified_blobs = blob_sidecars - .into_iter() - .map(|blob_sidecar| { - let gossip_verified_blob = - GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain); - - match gossip_verified_blob { - Ok(blob) => Ok(Some(blob)), - Err(GossipBlobError::RepeatBlob { proposer, .. }) => { - // Log the error but do not abort publication, we may need to publish the block - // or some of the other blobs if the block & blobs are only partially published - // by the other publisher. - debug!( - log, - "Blob for publication already known"; - "blob_index" => blob_sidecar.index, - "slot" => slot, - "proposer" => proposer, - ); - Ok(None) - } - Err(e) => { - error!( - log, - "Blob for publication is gossip-invalid"; - "blob_index" => blob_sidecar.index, - "slot" => slot, - "error" => ?e, - ); - Err(warp_utils::reject::custom_bad_request(e.to_string())) - } - } - }) - .collect::, Rejection>>()?; - - let gossip_verified_data_columns = data_column_sidecars - .into_iter() - .map(|data_column_sidecar| { - let column_index = data_column_sidecar.index as usize; - let subnet = - DataColumnSubnetId::from_column_index::(column_index, &chain.spec); - let gossip_verified_column = - GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), &chain); - - match gossip_verified_column { - Ok(blob) => Ok(Some(blob)), - Err(GossipDataColumnError::PriorKnown { proposer, .. }) => { - // Log the error but do not abort publication, we may need to publish the block - // or some of the other data columns if the block & data columns are only - // partially published by the other publisher. - debug!( - log, - "Data column for publication already known"; - "column_index" => column_index, - "slot" => slot, - "proposer" => proposer, - ); - Ok(None) - } - Err(e) => { - error!( - log, - "Data column for publication is gossip-invalid"; - "column_index" => column_index, - "slot" => slot, - "error" => ?e, - ); - Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) - } - } - }) - .collect::, Rejection>>()?; - - let publishable_blobs = gossip_verified_blobs - .iter() - .flatten() - .map(|b| b.clone_blob()) - .collect::>(); - - let publishable_data_columns = gossip_verified_data_columns - .iter() - .flatten() - .map(|b| b.clone_data_column()) - .collect::>(); - let block_root = block_root.unwrap_or_else(|| { gossip_verified_block_result.as_ref().map_or_else( |_| block.canonical_root(), @@ -321,12 +146,9 @@ pub async fn publish_block>( }); let should_publish_block = gossip_verified_block_result.is_ok(); - if let BroadcastValidation::Gossip = validation_level { + if BroadcastValidation::Gossip == validation_level && should_publish_block { publish_block_p2p( block.clone(), - should_publish_block, - publishable_blobs.clone(), - publishable_data_columns.clone(), sender_clone.clone(), log.clone(), seen_timestamp, @@ -337,38 +159,39 @@ pub async fn publish_block>( let publish_fn_completed = Arc::new(AtomicBool::new(false)); let block_to_publish = block.clone(); let publish_fn = || { - match validation_level { - BroadcastValidation::Gossip => (), - BroadcastValidation::Consensus => publish_block_p2p( - block_to_publish.clone(), - should_publish_block, - publishable_blobs.clone(), - publishable_data_columns.clone(), - sender_clone.clone(), - log.clone(), - seen_timestamp, - )?, - BroadcastValidation::ConsensusAndEquivocation => { - check_slashable(&chain, block_root, &block_to_publish, &log)?; - publish_block_p2p( + if should_publish_block { + match validation_level { + BroadcastValidation::Gossip => (), + BroadcastValidation::Consensus => publish_block_p2p( block_to_publish.clone(), - should_publish_block, - publishable_blobs.clone(), - publishable_data_columns.clone(), sender_clone.clone(), log.clone(), seen_timestamp, - )?; - } - }; + )?, + BroadcastValidation::ConsensusAndEquivocation => { + check_slashable(&chain, block_root, &block_to_publish, &log)?; + publish_block_p2p( + block_to_publish.clone(), + sender_clone.clone(), + log.clone(), + seen_timestamp, + )?; + } + }; + } + publish_fn_completed.store(true, Ordering::SeqCst); Ok(()) }; + // Wait for blobs/columns to get gossip verified before proceeding further as we need them for import. + let (gossip_verified_blobs, gossip_verified_columns) = build_sidecar_task_handle.await?; + for blob in gossip_verified_blobs.into_iter().flatten() { - // Importing the blobs could trigger block import and network publication in the case - // where the block was already seen on gossip. - if let Err(e) = Box::pin(chain.process_gossip_blob(blob, &publish_fn)).await { + publish_blob_sidecars(network_tx, &blob).map_err(|_| { + warp_utils::reject::custom_server_error("unable to publish blob sidecars".into()) + })?; + if let Err(e) = Box::pin(chain.process_gossip_blob(blob)).await { let msg = format!("Invalid blob: {e}"); return if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(msg)) @@ -383,14 +206,12 @@ pub async fn publish_block>( } } - if gossip_verified_data_columns - .iter() - .map(Option::is_some) - .count() - > 0 - { + if gossip_verified_columns.iter().map(Option::is_some).count() > 0 { + publish_column_sidecars(network_tx, &gossip_verified_columns, &chain).map_err(|_| { + warp_utils::reject::custom_server_error("unable to publish data column sidecars".into()) + })?; let sampling_columns_indices = &network_globals.sampling_columns; - let sampling_columns = gossip_verified_data_columns + let sampling_columns = gossip_verified_columns .into_iter() .flatten() .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) @@ -501,6 +322,224 @@ pub async fn publish_block>( } } +type BuildDataSidecarTaskResult = Result< + ( + Vec>>, + Vec>>, + ), + Rejection, +>; + +/// Convert blobs to either: +/// +/// 1. Blob sidecars if prior to peer DAS, or +/// 2. Data column sidecars if post peer DAS. +fn spawn_build_data_sidecar_task( + chain: Arc>, + block: Arc>>, + proofs_and_blobs: UnverifiedBlobs, + log: Logger, +) -> Result>, Rejection> { + chain + .clone() + .task_executor + .spawn_blocking_handle( + move || { + let Some((kzg_proofs, blobs)) = proofs_and_blobs else { + return Ok((vec![], vec![])); + }; + + let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + if !peer_das_enabled { + // Pre-PeerDAS: construct blob sidecars for the network. + let gossip_verified_blobs = + build_gossip_verified_blobs(&chain, &block, blobs, kzg_proofs, &log)?; + Ok((gossip_verified_blobs, vec![])) + } else { + // Post PeerDAS: construct data columns. + let gossip_verified_data_columns = + build_gossip_verified_data_columns(&chain, &block, blobs, &log)?; + Ok((vec![], gossip_verified_data_columns)) + } + }, + "build_data_sidecars", + ) + .ok_or(warp_utils::reject::custom_server_error( + "runtime shutdown".to_string(), + )) + .map(|r| { + r.map_err(|_| warp_utils::reject::custom_server_error("join error".to_string())) + .and_then(|output| async move { output }) + }) +} + +fn build_gossip_verified_data_columns( + chain: &BeaconChain, + block: &SignedBeaconBlock>, + blobs: BlobsList, + log: &Logger, +) -> Result>>, Rejection> { + let slot = block.slot(); + let data_column_sidecars = + build_blob_data_column_sidecars(chain, block, blobs).map_err(|e| { + error!( + log, + "Invalid data column - not publishing block"; + "error" => ?e, + "slot" => slot + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + })?; + + let slot = block.slot(); + let gossip_verified_data_columns = data_column_sidecars + .into_iter() + .map(|data_column_sidecar| { + let column_index = data_column_sidecar.index as usize; + let subnet = + DataColumnSubnetId::from_column_index::(column_index, &chain.spec); + let gossip_verified_column = + GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), chain); + + match gossip_verified_column { + Ok(blob) => Ok(Some(blob)), + Err(GossipDataColumnError::PriorKnown { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other data columns if the block & data columns are only + // partially published by the other publisher. + debug!( + log, + "Data column for publication already known"; + "column_index" => column_index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Data column for publication is gossip-invalid"; + "column_index" => column_index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) + } + } + }) + .collect::, Rejection>>()?; + + Ok(gossip_verified_data_columns) +} + +fn build_gossip_verified_blobs( + chain: &BeaconChain, + block: &SignedBeaconBlock>, + blobs: BlobsList, + kzg_proofs: KzgProofs, + log: &Logger, +) -> Result>>, Rejection> { + let slot = block.slot(); + let gossip_verified_blobs = kzg_proofs + .into_iter() + .zip(blobs) + .enumerate() + .map(|(i, (proof, unverified_blob))| { + let timer = metrics::start_timer( + &beacon_chain::metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION, + ); + let blob_sidecar = BlobSidecar::new(i, unverified_blob, block, proof) + .map(Arc::new) + .map_err(|e| { + error!( + log, + "Invalid blob - not publishing block"; + "error" => ?e, + "blob_index" => i, + "slot" => slot, + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + })?; + drop(timer); + + let gossip_verified_blob = + GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, chain); + + match gossip_verified_blob { + Ok(blob) => Ok(Some(blob)), + Err(GossipBlobError::RepeatBlob { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other blobs if the block & blobs are only partially published + // by the other publisher. + debug!( + log, + "Blob for publication already known"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Blob for publication is gossip-invalid"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(e.to_string())) + } + } + }) + .collect::, Rejection>>()?; + + Ok(gossip_verified_blobs) +} + +fn publish_column_sidecars( + sender_clone: &UnboundedSender>, + data_column_sidecars: &[Option>], + chain: &BeaconChain, +) -> Result<(), BlockError> { + let malicious_withhold_count = chain.config.malicious_withhold_count; + let mut data_column_sidecars = data_column_sidecars + .iter() + .flatten() + .map(|d| d.clone_data_column()) + .collect::>(); + if malicious_withhold_count > 0 { + let columns_to_keep = data_column_sidecars + .len() + .saturating_sub(malicious_withhold_count); + // Randomize columns before dropping the last malicious_withhold_count items + data_column_sidecars.shuffle(&mut rand::thread_rng()); + data_column_sidecars.truncate(columns_to_keep); + } + let pubsub_messages = data_column_sidecars + .into_iter() + .map(|data_col| { + let subnet = DataColumnSubnetId::from_column_index::( + data_col.index as usize, + &chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) + }) + .collect::>(); + crate::publish_pubsub_messages(sender_clone, pubsub_messages) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) +} + +fn publish_blob_sidecars( + sender_clone: &UnboundedSender>, + blob: &GossipVerifiedBlob, +) -> Result<(), BlockError> { + let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); + crate::publish_pubsub_message(sender_clone, pubsub_message) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) +} + async fn post_block_import_logging_and_response( result: Result, validation_level: BroadcastValidation, diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index fdc99fa954..ddacde9a3f 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,3 +1,4 @@ +use crate::metrics; use crate::ExecutionOptimistic; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; @@ -23,6 +24,7 @@ impl StateId { &self, chain: &BeaconChain, ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { + let _t = metrics::start_timer(&metrics::HTTP_API_STATE_ROOT_TIMES); let (slot, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index f55983ec66..e1ecf2d4fc 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -322,7 +322,7 @@ pub async fn consensus_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn consensus_partial_pass_only_consensus() { /* this test targets gossip-level validation */ - let validation_level: Option = Some(BroadcastValidation::Consensus); + let validation_level = BroadcastValidation::Consensus; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -378,7 +378,7 @@ pub async fn consensus_partial_pass_only_consensus() { tester.harness.chain.clone(), &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -615,8 +615,7 @@ pub async fn equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -671,7 +670,7 @@ pub async fn equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1228,8 +1227,7 @@ pub async fn blinded_equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -1311,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1465,8 +1463,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { "need at least 2 blobs for partial reveal" ); - let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()]; - let partial_blobs = vec![blobs.1.get(0).unwrap().clone()]; + let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; + let partial_blobs = vec![blobs.1.first().unwrap().clone()]; // Simulate the block being seen on gossip. block @@ -1486,7 +1484,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } @@ -1559,7 +1557,7 @@ pub async fn blobs_seen_on_gossip_without_block() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } @@ -1633,7 +1631,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } @@ -1705,7 +1703,7 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { tester .harness .chain - .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .process_gossip_blob(gossip_blob) .await .unwrap(); } diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index c3ed334782..e45dcf221c 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -139,7 +139,7 @@ impl ForkChoiceUpdates { fn insert(&mut self, update: ForkChoiceUpdateMetadata) { self.updates .entry(update.state.head_block_hash) - .or_insert_with(Vec::new) + .or_default() .push(update); } @@ -447,9 +447,14 @@ pub async fn proposer_boost_re_org_test( // Send proposer preparation data for all validators. let proposer_preparation_data = all_validators .iter() - .map(|i| ProposerPreparationData { - validator_index: *i as u64, - fee_recipient: Address::from_low_u64_be(*i as u64), + .map(|i| { + ( + ProposerPreparationData { + validator_index: *i as u64, + fee_recipient: Address::from_low_u64_be(*i as u64), + }, + None, + ) }) .collect::>(); harness @@ -459,7 +464,7 @@ pub async fn proposer_boost_re_org_test( .unwrap() .update_proposer_preparation( head_slot.epoch(E::slots_per_epoch()) + 1, - &proposer_preparation_data, + proposer_preparation_data.iter().map(|(a, b)| (a, b)), ) .await; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 01731530d3..dd481f23ba 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -57,18 +57,18 @@ async fn el_syncing_then_synced() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // EL synced mock_el.server.set_syncing_response(Ok(false)); mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL is offline (errors on upcheck). @@ -85,9 +85,9 @@ async fn el_offline() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline. @@ -128,9 +128,9 @@ async fn el_error_on_new_payload() { // The EL should now be *offline* according to the API. let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // Processing a block successfully should remove the status. mock_el.server.set_new_payload_status( @@ -144,9 +144,9 @@ async fn el_error_on_new_payload() { harness.process_block_result((block, blobs)).await.unwrap(); let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `node health` endpoint when the EL is offline. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 940f3ae9c0..7007a14466 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -13,8 +13,10 @@ use eth2::{ Error::ServerMessage, StatusCode, Timeouts, }; +use execution_layer::expected_gas_limit; use execution_layer::test_utils::{ - MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + mock_builder_extra_data, mock_el_extra_data, MockBuilder, Operation, + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; @@ -272,10 +274,10 @@ impl ApiTester { let mock_builder_server = harness.set_mock_builder(beacon_url.clone()); // Start the mock builder service prior to building the chain out. - harness.runtime.task_executor.spawn( - async move { mock_builder_server.await }, - "mock_builder_server", - ); + harness + .runtime + .task_executor + .spawn(mock_builder_server, "mock_builder_server"); let mock_builder = harness.mock_builder.clone(); @@ -348,7 +350,6 @@ impl ApiTester { let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); - let log = test_logger(); let ApiServer { @@ -640,7 +641,7 @@ impl ApiTester { self } - pub async fn test_beacon_blocks_finalized(self) -> Self { + pub async fn test_beacon_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -677,7 +678,7 @@ impl ApiTester { self } - pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -818,7 +819,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let unsupported_media_response = self @@ -858,7 +859,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -909,7 +910,7 @@ impl ApiTester { for i in validator_indices { if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), }); } @@ -943,7 +944,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -1011,7 +1012,7 @@ impl ApiTester { || statuses.contains(&status.superstatus()) { validators.push(ValidatorData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), status, validator, @@ -1640,11 +1641,7 @@ impl ApiTester { let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); let blob_indices = if use_indices { - Some( - (0..num_blobs.saturating_sub(1) as u64) - .into_iter() - .collect::>(), - ) + Some((0..num_blobs.saturating_sub(1) as u64).collect::>()) } else { None }; @@ -1662,7 +1659,7 @@ impl ApiTester { blob_indices.map_or(num_blobs, |indices| indices.len()) ); let expected = block.slot(); - assert_eq!(result.get(0).unwrap().slot(), expected); + assert_eq!(result.first().unwrap().slot(), expected); self } @@ -1700,9 +1697,9 @@ impl ApiTester { break; } } - let test_slot = test_slot.expect(&format!( - "should be able to find a block matching zero_blobs={zero_blobs}" - )); + let test_slot = test_slot.unwrap_or_else(|| { + panic!("should be able to find a block matching zero_blobs={zero_blobs}") + }); match self .client @@ -1771,7 +1768,6 @@ impl ApiTester { .attestations() .map(|att| att.clone_as_attestation()) .collect::>() - .into() }, ); @@ -1908,7 +1904,7 @@ impl ApiTester { let result = match self .client - .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .get_beacon_light_client_updates::(current_sync_committee_period, 1) .await { Ok(result) => result, @@ -1920,7 +1916,7 @@ impl ApiTester { .light_client_server_cache .get_light_client_updates( &self.chain.store, - current_sync_committee_period as u64, + current_sync_committee_period, 1, &self.chain.spec, ) @@ -2313,7 +2309,7 @@ impl ApiTester { .unwrap() .data .is_syncing; - assert_eq!(is_syncing, true); + assert!(is_syncing); // Reset sync state. *self @@ -2363,7 +2359,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id.clone()) + .get_node_peers_by_id(self.external_peer_id) .await .unwrap() .data; @@ -3513,6 +3509,7 @@ impl ApiTester { self } + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. pub async fn test_get_validator_aggregate_attestation(self) -> Self { if self .chain @@ -3755,7 +3752,11 @@ impl ApiTester { self } - pub async fn test_post_validator_register_validator(self) -> Self { + async fn generate_validator_registration_data( + &self, + fee_recipient_generator: impl Fn(usize) -> Address, + gas_limit: u64, + ) -> (Vec, Vec
) { let mut registrations = vec![]; let mut fee_recipients = vec![]; @@ -3766,15 +3767,13 @@ impl ApiTester { epoch: genesis_epoch, }; - let expected_gas_limit = 11_111_111; - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); + let fee_recipient = fee_recipient_generator(val_index); let data = ValidatorRegistrationData { fee_recipient, - gas_limit: expected_gas_limit, + gas_limit, timestamp: 0, pubkey, }; @@ -3797,6 +3796,17 @@ impl ApiTester { registrations.push(signed); } + (registrations, fee_recipients) + } + + pub async fn test_post_validator_register_validator(self) -> Self { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; + self.client .post_validator_register_validator(®istrations) .await @@ -3811,14 +3821,22 @@ impl ApiTester { .zip(fee_recipients.into_iter()) .enumerate() { - let actual = self + let actual_fee_recipient = self .chain .execution_layer .as_ref() .unwrap() .get_suggested_fee_recipient(val_index as u64) .await; - assert_eq!(actual, fee_recipient); + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT)); } self @@ -3839,46 +3857,12 @@ impl ApiTester { ) .await; - let mut registrations = vec![]; - let mut fee_recipients = vec![]; - - let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); - let fork = Fork { - current_version: self.chain.spec.genesis_fork_version, - previous_version: self.chain.spec.genesis_fork_version, - epoch: genesis_epoch, - }; - - let expected_gas_limit = 11_111_111; - - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { - let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); - - let data = ValidatorRegistrationData { - fee_recipient, - gas_limit: expected_gas_limit, - timestamp: 0, - pubkey, - }; - - let domain = self.chain.spec.get_domain( - genesis_epoch, - Domain::ApplicationMask(ApplicationDomain::Builder), - &fork, - Hash256::zero(), - ); - let message = data.signing_root(domain); - let signature = keypair.sk.sign(message); - - let signed = SignedValidatorRegistrationData { - message: data, - signature, - }; - - fee_recipients.push(fee_recipient); - registrations.push(signed); - } + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; self.client .post_validator_register_validator(®istrations) @@ -3911,6 +3895,47 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator_higher_gas_limit(&self) { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT + 10_000_000, + ) + .await; + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual_fee_recipient = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT + 10_000_000)); + } + } + pub async fn test_post_validator_liveness_epoch(self) -> Self { let epoch = self.chain.epoch().unwrap(); let head_state = self.chain.head_beacon_state_cloned(); @@ -4029,9 +4054,9 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4056,9 +4081,10 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 16_384); + // This is the graffiti of the mock execution layer, not the builder. + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4083,9 +4109,9 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4107,9 +4133,9 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -4126,10 +4152,16 @@ impl ApiTester { pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { // Mutate gas limit. + let builder_limit = expected_gas_limit( + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_LIMIT + 10_000_000, + self.chain.spec.as_ref(), + ) + .expect("calculate expected gas limit"); self.mock_builder .as_ref() .unwrap() - .add_operation(Operation::GasLimit(30_000_000)); + .add_operation(Operation::GasLimit(builder_limit as usize)); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -4147,9 +4179,9 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 30_000_000); + assert_eq!(payload.gas_limit(), builder_limit); // This cache should not be populated because fallback should not have been used. assert!(self @@ -4159,6 +4191,49 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + + self + } + + pub async fn test_builder_payload_rejected_when_gas_limit_incorrect(self) -> Self { + self.test_post_validator_register_validator_higher_gas_limit() + .await; + + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::GasLimit(1)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4188,7 +4263,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), 30_000_000); @@ -4232,6 +4307,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -4315,6 +4393,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4404,6 +4485,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4491,6 +4575,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4577,6 +4664,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4647,6 +4737,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4707,6 +4800,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4780,6 +4876,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); // Without proposing, advance into the next slot, this should make us cross the threshold // number of skips, causing us to use the fallback. @@ -4809,6 +4907,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4915,6 +5015,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this // scenario starts at an epoch boundary). @@ -4954,6 +5056,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); self } @@ -5032,9 +5136,8 @@ impl ApiTester { pub async fn test_builder_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5061,7 +5164,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. @@ -5072,15 +5175,16 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5110,7 +5214,7 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); self @@ -5149,6 +5253,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -5214,6 +5321,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5279,6 +5389,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5343,6 +5456,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -5979,16 +6095,17 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(false)); // Change head to be optimistic. - self.chain + if let Some(head_node) = self + .chain .canonical_head .fork_choice_write_lock() .proto_array_mut() .core_proto_array_mut() .nodes .last_mut() - .map(|head_node| { - head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) - }); + { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + } // Check responses are now optimistic. let result = self @@ -6021,8 +6138,8 @@ async fn poll_events, eth2::Error>> + Unpin }; tokio::select! { - _ = collect_stream_fut => {events} - _ = tokio::time::sleep(timeout) => { return events; } + _ = collect_stream_fut => { events } + _ = tokio::time::sleep(timeout) => { events } } } @@ -6058,31 +6175,31 @@ async fn test_unsupported_media_response() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_get() { +async fn beacon_get_state_hashes() { + ApiTester::new() + .await + .test_beacon_states_root_finalized() + .await + .test_beacon_states_finality_checkpoints_finalized() + .await + .test_beacon_states_root() + .await + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info() { ApiTester::new() .await .test_beacon_genesis() .await - .test_beacon_states_root_finalized() - .await .test_beacon_states_fork_finalized() .await - .test_beacon_states_finality_checkpoints_finalized() - .await - .test_beacon_headers_block_id_finalized() - .await - .test_beacon_blocks_finalized::() - .await - .test_beacon_blinded_blocks_finalized::() - .await .test_debug_beacon_states_finalized() .await - .test_beacon_states_root() - .await .test_beacon_states_fork() .await - .test_beacon_states_finality_checkpoints() - .await .test_beacon_states_validators() .await .test_beacon_states_validator_balances() @@ -6092,6 +6209,18 @@ async fn beacon_get() { .test_beacon_states_validator_id() .await .test_beacon_states_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_blocks() { + ApiTester::new() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized() + .await + .test_beacon_blinded_blocks_finalized() .await .test_beacon_headers_all_slots() .await @@ -6106,6 +6235,12 @@ async fn beacon_get() { .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_pools() { + ApiTester::new() .await .test_get_beacon_pool_attestations() .await @@ -6682,6 +6817,8 @@ async fn post_validator_register_valid_v3() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_gas_limit_mutation() { ApiTester::new_mev_tester() + .await + .test_builder_payload_rejected_when_gas_limit_incorrect() .await .test_payload_accepts_mutated_gas_limit() .await; diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index f835d13fb6..d92f986440 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -3,24 +3,23 @@ name = "http_metrics" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } +beacon_chain = { workspace = true } +lighthouse_network = { workspace = true } +lighthouse_version = { workspace = true } +malloc_utils = { workspace = true } +metrics = { workspace = true } serde = { workspace = true } slog = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } -lighthouse_network = { workspace = true } slot_clock = { workspace = true } -lighthouse_metrics = { workspace = true } -lighthouse_version = { workspace = true } +store = { workspace = true } +warp = { workspace = true } warp_utils = { workspace = true } -malloc_utils = { workspace = true } [dev-dependencies] -tokio = { workspace = true } -reqwest = { workspace = true } -types = { workspace = true } logging = { workspace = true } +reqwest = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index d68efff432..d751c51e4c 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,8 +1,8 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; +use metrics::TextEncoder; pub fn gather_prometheus_metrics( ctx: &Context, @@ -17,13 +17,13 @@ pub fn gather_prometheus_metrics( // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton + // The `metrics` crate has a `DEFAULT_REGISTRY` global singleton // which keeps the state of all the metrics. Dynamically updated things will already be // up-to-date in the registry (because they update themselves) however statically updated // things need to be "scraped". // // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, - // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into + // using `metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. if let Some(beacon_chain) = ctx.chain.as_ref() { @@ -48,7 +48,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index b0f5b9a5e1..485f32b37a 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,50 +5,49 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] -alloy-primitives = { workspace = true} +alloy-primitives = { workspace = true } +alloy-rlp = { workspace = true } +bytes = { workspace = true } +delay_map = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } discv5 = { workspace = true } -gossipsub = { workspace = true } -unsigned-varint = { version = "0.8", features = ["codec"] } -ssz_types = { workspace = true } -types = { workspace = true } -serde = { workspace = true } +either = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -slog = { workspace = true } -lighthouse_version = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } -error-chain = { workspace = true } -dirs = { workspace = true } fnv = { workspace = true } -lighthouse_metrics = { workspace = true } -smallvec = { workspace = true } -tokio-io-timeout = "1" +futures = { workspace = true } +gossipsub = { workspace = true } +hex = { workspace = true } +itertools = { workspace = true } +libp2p-mplex = "0.42" +lighthouse_version = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } -sha2 = { workspace = true } -snap = { workspace = true } -hex = { workspace = true } -tokio-util = { workspace = true } -tiny-keccak = "2" -task_executor = { workspace = true } +prometheus-client = "0.22.0" rand = { workspace = true } -directory = { workspace = true } regex = { workspace = true } +serde = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +smallvec = { workspace = true } +snap = { workspace = true } +ssz_types = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } -prometheus-client = "0.22.0" +task_executor = { workspace = true } +tiny-keccak = "2" +tokio = { workspace = true } +tokio-io-timeout = "1" +tokio-util = { workspace = true } +types = { workspace = true } +unsigned-varint = { version = "0.8", features = ["codec"] } unused_port = { workspace = true } -delay_map = { workspace = true } -bytes = { workspace = true } -either = { workspace = true } -itertools = { workspace = true } -alloy-rlp = { workspace = true } # Local dependencies void = "1.0.2" -libp2p-mplex = "0.42" [dependencies.libp2p] version = "0.54" @@ -56,13 +55,13 @@ default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] [dev-dependencies] -slog-term = { workspace = true } -slog-async = { workspace = true } -tempfile = { workspace = true } -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } async-channel = { workspace = true } logging = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } +tempfile = { workspace = true } [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md index 006eb20a70..aba85f6184 100644 --- a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md +++ b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md @@ -2,6 +2,9 @@ - Remove the beta tag from the v1.2 upgrade. See [PR 6344](https://github.com/sigp/lighthouse/pull/6344) +- Correct state inconsistencies with the mesh and connected peers due to the fanout mapping. + See [PR 6244](https://github.com/sigp/lighthouse/pull/6244) + - Implement IDONTWANT messages as per [spec](https://github.com/libp2p/specs/pull/548). See [PR 5422](https://github.com/sigp/lighthouse/pull/5422) diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index a01d60dae9..61f5730c08 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -wasm-bindgen = ["getrandom/js"] +wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"] rsa = [] [dependencies] @@ -22,12 +22,12 @@ bytes = "1.5" either = "1.9" fnv = "1.0.7" futures = "0.3.30" -futures-ticker = "0.0.3" futures-timer = "3.0.2" getrandom = "0.2.12" -hashlink.workspace = true +hashlink = { workspace = true } hex_fmt = "0.3.0" libp2p = { version = "0.54", default-features = false } +prometheus-client = "0.22.0" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" rand = "0.8" @@ -36,7 +36,6 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" tracing = "0.1.37" void = "1.0.2" -prometheus-client = "0.22.0" web-time = "1.1.0" [dev-dependencies] diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index f83a24baaf..537d2319c2 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -48,8 +48,7 @@ pub(crate) struct BackoffStorage { impl BackoffStorage { fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize { - ((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos()) - as usize + d.as_nanos().div_ceil(heartbeat_interval.as_nanos()) as usize } pub(crate) fn new( diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index c50e76e7f2..c4e20e4397 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -29,8 +29,7 @@ use std::{ time::Duration, }; -use futures::StreamExt; -use futures_ticker::Ticker; +use futures::FutureExt; use hashlink::LinkedHashMap; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; @@ -74,6 +73,7 @@ use super::{ types::RpcOut, }; use super::{PublishError, SubscriptionError, TopicScoreParams, ValidationError}; +use futures_timer::Delay; use quick_protobuf::{MessageWrite, Writer}; use std::{cmp::Ordering::Equal, fmt::Debug}; @@ -301,7 +301,7 @@ pub struct Behaviour { mcache: MessageCache, /// Heartbeat interval stream. - heartbeat: Ticker, + heartbeat: Delay, /// Number of heartbeats since the beginning of time; this allows us to amortize some resource /// clean up -- eg backoff clean up. @@ -318,7 +318,7 @@ pub struct Behaviour { outbound_peers: HashSet, /// Stores optional peer score data together with thresholds and decay interval. - peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker)>, + peer_score: Option<(PeerScore, PeerScoreThresholds, Delay)>, /// Counts the number of `IHAVE` received from each peer since the last heartbeat. count_received_ihave: HashMap, @@ -466,10 +466,7 @@ where config.backoff_slack(), ), mcache: MessageCache::new(config.history_gossip(), config.history_length()), - heartbeat: Ticker::new_with_next( - config.heartbeat_interval(), - config.heartbeat_initial_delay(), - ), + heartbeat: Delay::new(config.heartbeat_interval() + config.heartbeat_initial_delay()), heartbeat_ticks: 0, px_peers: HashSet::new(), outbound_peers: HashSet::new(), @@ -682,9 +679,15 @@ where // Gossipsub peers None => { tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // `fanout_peers` is always non-empty if it's `Some`. + let fanout_peers = self + .fanout + .get(&topic_hash) + .map(|peers| if peers.is_empty() { None } else { Some(peers) }) + .unwrap_or(None); // If we have fanout peers add them to the map. - if self.fanout.contains_key(&topic_hash) { - for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + if let Some(peers) = fanout_peers { + for peer in peers { recipient_peers.insert(*peer); } } else { @@ -764,7 +767,7 @@ where } } else { tracing::error!(peer_id = %peer_id, - "Could not PUBLISH, peer doesn't exist in connected peer list"); + "Could not send PUBLISH, peer doesn't exist in connected peer list"); } } @@ -776,6 +779,11 @@ where return Err(PublishError::AllQueuesFull(recipient_peers.len())); } + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, raw_message.source.as_ref()); + } + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { @@ -933,7 +941,7 @@ where return Err("Peer score set twice".into()); } - let interval = Ticker::new(params.decay_interval); + let interval = Delay::new(params.decay_interval); let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); self.peer_score = Some((peer_score, threshold, interval)); Ok(()) @@ -1066,7 +1074,7 @@ where }); } else { tracing::error!(peer = %peer_id, - "Could not GRAFT, peer doesn't exist in connected peer list"); + "Could not send GRAFT, peer doesn't exist in connected peer list"); } // If the peer did not previously exist in any mesh, inform the handler @@ -1165,7 +1173,7 @@ where peer.sender.prune(prune); } else { tracing::error!(peer = %peer_id, - "Could not PRUNE, peer doesn't exist in connected peer list"); + "Could not send PRUNE, peer doesn't exist in connected peer list"); } // If the peer did not previously exist in any mesh, inform the handler @@ -1203,7 +1211,7 @@ where } fn score_below_threshold_from_scores( - peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker)>, + peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay)>, peer_id: &PeerId, threshold: impl Fn(&PeerScoreThresholds) -> f64, ) -> (bool, f64) { @@ -1344,7 +1352,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not IWANT, peer doesn't exist in connected peer list"); + "Could not send IWANT, peer doesn't exist in connected peer list"); } } tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); @@ -1367,7 +1375,7 @@ where for id in iwant_msgs { // If we have it and the IHAVE count is not above the threshold, - // foward the message. + // forward the message. if let Some((msg, count)) = self .mcache .get_with_iwant_counts(&id, peer_id) @@ -1380,7 +1388,7 @@ where "IWANT: Peer has asked for message too many times; ignoring request" ); } else if let Some(peer) = &mut self.connected_peers.get_mut(peer_id) { - if peer.dont_send.get(&id).is_some() { + if peer.dont_send_received.get(&id).is_some() { tracing::debug!(%peer_id, message=%id, "Peer already sent IDONTWANT for this message"); continue; } @@ -1407,7 +1415,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not IWANT, peer doesn't exist in connected peer list"); + "Could not send IWANT, peer doesn't exist in connected peer list"); } } } @@ -1812,6 +1820,15 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); + if let Some(metrics) = self.metrics.as_mut() { + if let Some(peer) = self.connected_peers.get_mut(propagation_source) { + // Record if we received a message that we already sent a IDONTWANT for to the peer + if peer.dont_send_sent.contains_key(&msg_id) { + metrics.register_idontwant_messages_ignored_per_topic(&raw_message.topic); + } + } + } + // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1830,7 +1847,7 @@ where // Broadcast IDONTWANT messages if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { - self.send_idontwant(&raw_message, &msg_id, propagation_source); + self.send_idontwant(&raw_message, &msg_id, Some(propagation_source)); } tracing::debug!( @@ -2050,8 +2067,11 @@ where } } - // remove unsubscribed peers from the mesh if it exists + // remove unsubscribed peers from the mesh and fanout if they exist there. for (peer_id, topic_hash) in unsubscribed_peers { + self.fanout + .get_mut(&topic_hash) + .map(|peers| peers.remove(&peer_id)); self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); } @@ -2075,7 +2095,7 @@ where } } else { tracing::error!(peer = %propagation_source, - "Could not GRAFT, peer doesn't exist in connected peer list"); + "Could not send GRAFT, peer doesn't exist in connected peer list"); } // Notify the application of the subscriptions @@ -2093,9 +2113,12 @@ where fn apply_iwant_penalties(&mut self) { if let Some((peer_score, ..)) = &mut self.peer_score { for (peer, count) in self.gossip_promises.get_broken_promises() { - peer_score.add_penalty(&peer, count); - if let Some(metrics) = self.metrics.as_mut() { - metrics.register_score_penalty(Penalty::BrokenPromise); + // We do not apply penalties to nodes that have disconnected. + if self.connected_peers.contains_key(&peer) { + peer_score.add_penalty(&peer, count); + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_score_penalty(Penalty::BrokenPromise); + } } } } @@ -2501,11 +2524,19 @@ where // Flush stale IDONTWANTs. for peer in self.connected_peers.values_mut() { - while let Some((_front, instant)) = peer.dont_send.front() { + while let Some((_front, instant)) = peer.dont_send_received.front() { if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() { break; } else { - peer.dont_send.pop_front(); + peer.dont_send_received.pop_front(); + } + } + // If metrics are not enabled, this queue would be empty. + while let Some((_front, instant)) = peer.dont_send_sent.front() { + if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() { + break; + } else { + peer.dont_send_sent.pop_front(); } } } @@ -2590,7 +2621,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not IHAVE, peer doesn't exist in connected peer list"); + "Could not send IHAVE, peer doesn't exist in connected peer list"); } } } @@ -2676,7 +2707,7 @@ where peer.sender.prune(prune); } else { tracing::error!(peer = %peer_id, - "Could not PRUNE, peer doesn't exist in connected peer list"); + "Could not send PRUNE, peer doesn't exist in connected peer list"); } // inform the handler @@ -2696,7 +2727,7 @@ where &mut self, message: &RawMessage, msg_id: &MessageId, - propagation_source: &PeerId, + propagation_source: Option<&PeerId>, ) { let Some(mesh_peers) = self.mesh.get(&message.topic) else { return; @@ -2707,14 +2738,14 @@ where let recipient_peers = mesh_peers .iter() .chain(iwant_peers.iter()) - .filter(|peer_id| { - *peer_id != propagation_source && Some(*peer_id) != message.source.as_ref() + .filter(|&peer_id| { + Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref() }); for peer_id in recipient_peers { let Some(peer) = self.connected_peers.get_mut(peer_id) else { - tracing::error!(peer = %peer_id, - "Could not IDONTWANT, peer doesn't exist in connected peer list"); + // It can be the case that promises to disconnected peers appear here. In this case + // we simply ignore the peer-id. continue; }; @@ -2740,6 +2771,16 @@ where .entry(*peer_id) .or_default() .non_priority += 1; + return; + } + // IDONTWANT sent successfully. + if let Some(metrics) = self.metrics.as_mut() { + peer.dont_send_sent.insert(msg_id.clone(), Instant::now()); + // Don't exceed capacity. + if peer.dont_send_sent.len() > IDONTWANT_CAP { + peer.dont_send_sent.pop_front(); + } + metrics.register_idontwant_messages_sent_per_topic(&message.topic); } } } @@ -2797,7 +2838,7 @@ where if !recipient_peers.is_empty() { for peer_id in recipient_peers.iter() { if let Some(peer) = self.connected_peers.get_mut(peer_id) { - if peer.dont_send.get(msg_id).is_some() { + if peer.dont_send_received.get(msg_id).is_some() { tracing::debug!(%peer_id, message=%msg_id, "Peer doesn't want message"); continue; } @@ -2979,7 +3020,7 @@ where } } else { tracing::error!(peer = %peer_id, - "Could not SUBSCRIBE, peer doesn't exist in connected peer list"); + "Could not send SUBSCRIBE, peer doesn't exist in connected peer list"); } } @@ -3151,7 +3192,8 @@ where connections: vec![], sender: RpcSender::new(self.config.connection_handler_queue_len()), topics: Default::default(), - dont_send: LinkedHashMap::new(), + dont_send_received: LinkedHashMap::new(), + dont_send_sent: LinkedHashMap::new(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3183,7 +3225,8 @@ where connections: vec![], sender: RpcSender::new(self.config.connection_handler_queue_len()), topics: Default::default(), - dont_send: LinkedHashMap::new(), + dont_send_received: LinkedHashMap::new(), + dont_send_sent: LinkedHashMap::new(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3355,10 +3398,10 @@ where metrics.register_idontwant_bytes(idontwant_size); } for message_id in message_ids { - peer.dont_send.insert(message_id, Instant::now()); + peer.dont_send_received.insert(message_id, Instant::now()); // Don't exceed capacity. - if peer.dont_send.len() > IDONTWANT_CAP { - peer.dont_send.pop_front(); + if peer.dont_send_received.len() > IDONTWANT_CAP { + peer.dont_send_received.pop_front(); } } } @@ -3387,14 +3430,16 @@ where } // update scores - if let Some((peer_score, _, interval)) = &mut self.peer_score { - while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { + if let Some((peer_score, _, delay)) = &mut self.peer_score { + if delay.poll_unpin(cx).is_ready() { peer_score.refresh_scores(); + delay.reset(peer_score.params.decay_interval); } } - while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { + if self.heartbeat.poll_unpin(cx).is_ready() { self.heartbeat(); + self.heartbeat.reset(self.config.heartbeat_interval()); } Poll::Pending diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 62f026b568..90b8fe43fb 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -25,6 +25,7 @@ use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::types::RpcReceiver; use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; use byteorder::{BigEndian, ByteOrder}; +use futures::StreamExt; use libp2p::core::ConnectedPoint; use rand::Rng; use std::net::Ipv4Addr; @@ -238,7 +239,8 @@ where kind: kind.clone().unwrap_or(PeerKind::Floodsub), connections: vec![connection_id], topics: Default::default(), - dont_send: LinkedHashMap::new(), + dont_send_received: LinkedHashMap::new(), + dont_send_sent: LinkedHashMap::new(), sender, }, ); @@ -626,7 +628,8 @@ fn test_join() { kind: PeerKind::Floodsub, connections: vec![connection_id], topics: Default::default(), - dont_send: LinkedHashMap::new(), + dont_send_received: LinkedHashMap::new(), + dont_send_sent: LinkedHashMap::new(), sender, }, ); @@ -1023,7 +1026,8 @@ fn test_get_random_peers() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: RpcSender::new(gs.config.connection_handler_queue_len()), - dont_send: LinkedHashMap::new(), + dont_send_sent: LinkedHashMap::new(), + dont_send_received: LinkedHashMap::new(), }, ); } @@ -5408,7 +5412,7 @@ fn doesnt_forward_idontwant() { .unwrap(); let message_id = gs.config.message_id(&message); let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); - peer.dont_send.insert(message_id, Instant::now()); + peer.dont_send_received.insert(message_id, Instant::now()); gs.handle_received_message(raw_message.clone(), &local_id); assert_eq!( @@ -5457,7 +5461,7 @@ fn parses_idontwant() { }, ); let peer = gs.connected_peers.get_mut(&peers[1]).unwrap(); - assert!(peer.dont_send.get(&message_id).is_some()); + assert!(peer.dont_send_received.get(&message_id).is_some()); } /// Test that a node clears stale IDONTWANT messages. @@ -5473,10 +5477,10 @@ fn clear_stale_idontwant() { .create_network(); let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); - peer.dont_send + peer.dont_send_received .insert(MessageId::new(&[1, 2, 3, 4]), Instant::now()); std::thread::sleep(Duration::from_secs(3)); gs.heartbeat(); let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); - assert!(peer.dont_send.is_empty()); + assert!(peer.dont_send_received.is_empty()); } diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index a4ac389a74..d3ca6c299e 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -188,6 +188,12 @@ pub(crate) struct Metrics { /// The number of bytes we have received in every IDONTWANT control message. idontwant_bytes: Counter, + /// Number of IDONTWANT messages sent per topic. + idontwant_messages_sent_per_topic: Family, + + /// Number of full messages we received that we previously sent a IDONTWANT for. + idontwant_messages_ignored_per_topic: Family, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -341,6 +347,18 @@ impl Metrics { metric }; + // IDONTWANT messages sent per topic + let idontwant_messages_sent_per_topic = register_family!( + "idonttwant_messages_sent_per_topic", + "Number of IDONTWANT messages sent per topic" + ); + + // IDONTWANTs which were ignored, and we still received the message per topic + let idontwant_messages_ignored_per_topic = register_family!( + "idontwant_messages_ignored_per_topic", + "IDONTWANT messages that were sent but we received the full message regardless" + ); + let idontwant_bytes = { let metric = Counter::default(); registry.register( @@ -405,6 +423,8 @@ impl Metrics { idontwant_msgs, idontwant_bytes, idontwant_msgs_ids, + idontwant_messages_sent_per_topic, + idontwant_messages_ignored_per_topic, priority_queue_size, non_priority_queue_size, } @@ -608,6 +628,20 @@ impl Metrics { self.idontwant_bytes.inc_by(bytes as u64); } + /// Register receiving an IDONTWANT control message for a given topic. + pub(crate) fn register_idontwant_messages_sent_per_topic(&mut self, topic: &TopicHash) { + self.idontwant_messages_sent_per_topic + .get_or_create(topic) + .inc(); + } + + /// Register receiving a message for an already sent IDONTWANT. + pub(crate) fn register_idontwant_messages_ignored_per_topic(&mut self, topic: &TopicHash) { + self.idontwant_messages_ignored_per_topic + .get_or_create(topic) + .inc(); + } + /// Register receiving an IDONTWANT msg for this topic. pub(crate) fn register_idontwant(&mut self, msgs: usize) { self.idontwant_msgs.inc(); diff --git a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs index fa02f06f69..ec6fe7bdb6 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs @@ -44,7 +44,7 @@ mod tests; const TIME_CACHE_DURATION: u64 = 120; pub(crate) struct PeerScore { - params: PeerScoreParams, + pub(crate) params: PeerScoreParams, /// The score parameters. peer_stats: HashMap, /// Tracking peers per IP. diff --git a/beacon_node/lighthouse_network/gossipsub/src/types.rs b/beacon_node/lighthouse_network/gossipsub/src/types.rs index d14a929374..f5dac380e3 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/types.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/types.rs @@ -123,8 +123,10 @@ pub(crate) struct PeerConnections { pub(crate) sender: RpcSender, /// Subscribed topics. pub(crate) topics: BTreeSet, - /// Don't send messages. - pub(crate) dont_send: LinkedHashMap, + /// IDONTWANT messages received from the peer. + pub(crate) dont_send_received: LinkedHashMap, + /// IDONTWANT messages we sent to the peer. + pub(crate) dont_send_sent: LinkedHashMap, } /// Describes the types of peers that can exist in the gossipsub context. diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index d70e50b1da..21f3dc830f 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -305,12 +305,12 @@ impl Default for Config { let discv5_config = discv5::ConfigBuilder::new(discv5_listen_config) .enable_packet_filter() .session_cache_capacity(5000) - .request_timeout(Duration::from_secs(1)) + .request_timeout(Duration::from_secs(2)) .query_peer_timeout(Duration::from_secs(2)) .query_timeout(Duration::from_secs(30)) .request_retries(1) .enr_peer_update_min(10) - .query_parallelism(5) + .query_parallelism(8) .disable_report_discovered_peers() .ip_limit() // limits /24 IP's in buckets. .incoming_bucket_limit(8) // half the bucket size diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index d57c67bacb..578bb52b51 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -8,8 +8,8 @@ pub mod enr_ext; // Allow external use of the lighthouse ENR builder use crate::service::TARGET_SUBNET_PEERS; -use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use crate::{metrics, ClearDialError}; +use crate::{Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5}; pub use enr::{build_enr, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; @@ -205,7 +205,7 @@ impl Discovery { network_globals: Arc>, log: &slog::Logger, spec: &ChainSpec, - ) -> error::Result { + ) -> Result { let log = log.clone(); let enr_dir = match config.network_dir.to_str() { @@ -1052,10 +1052,6 @@ impl NetworkBehaviour for Discovery { discv5::Event::SocketUpdated(socket_addr) => { info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); - // We have SOCKET_UPDATED messages. This occurs when discovery has a majority of - // users reporting an external port and our ENR gets updated. - // Which means we are able to do NAT traversal. - metrics::set_gauge_vec(&metrics::NAT_OPEN, &["discv5"], 1); // Discv5 will have updated our local ENR. We save the updated version // to disk. diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index ced803add8..2f8fd82c51 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -63,7 +63,7 @@ impl<'de> Deserialize<'de> for PeerIdSerialized { // A wrapper struct that prints a dial error nicely. struct ClearDialError<'a>(&'a DialError); -impl<'a> ClearDialError<'a> { +impl ClearDialError<'_> { fn most_inner_error(err: &(dyn std::error::Error)) -> &(dyn std::error::Error) { let mut current = err; while let Some(source) = current.source() { @@ -73,7 +73,7 @@ impl<'a> ClearDialError<'a> { } } -impl<'a> std::fmt::Display for ClearDialError<'a> { +impl std::fmt::Display for ClearDialError<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match &self.0 { DialError::Transport(errors) => { @@ -101,7 +101,7 @@ impl<'a> std::fmt::Display for ClearDialError<'a> { } pub use crate::types::{ - error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, + Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, SubnetDiscovery, }; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index c3f64a5a1f..cb9c007b91 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static NAT_OPEN: LazyLock> = LazyLock::new(|| { @@ -8,6 +8,7 @@ pub static NAT_OPEN: LazyLock> = LazyLock::new(|| { &["protocol"], ) }); + pub static ADDRESS_UPDATE_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter( "libp2p_address_update_total", @@ -212,4 +213,6 @@ pub fn scrape_discovery_metrics() { set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); set_gauge_vec(&DISCOVERY_BYTES, &["inbound"], metrics.bytes_recv as i64); set_gauge_vec(&DISCOVERY_BYTES, &["outbound"], metrics.bytes_sent as i64); + set_gauge_vec(&NAT_OPEN, &["discv5_ipv4"], metrics.ipv4_contactable as i64); + set_gauge_vec(&NAT_OPEN, &["discv5_ipv6"], metrics.ipv6_contactable as i64); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index c1e72d250f..4df2566dac 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -4,7 +4,7 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; use crate::service::TARGET_SUBNET_PEERS; -use crate::{error, metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; +use crate::{metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; @@ -144,7 +144,7 @@ impl PeerManager { cfg: config::Config, network_globals: Arc>, log: &slog::Logger, - ) -> error::Result { + ) -> Result { let config::Config { discovery_enabled, metrics_enabled, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index c40f78b4b0..9fd059df85 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -7,10 +7,12 @@ use futures::StreamExt; use libp2p::core::transport::PortUse; use libp2p::core::ConnectedPoint; use libp2p::identity::PeerId; +use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, ToSwarm}; +pub use metrics::{set_gauge_vec, NAT_OPEN}; use slog::{debug, error, trace}; use types::EthSpec; @@ -139,10 +141,6 @@ impl NetworkBehaviour for PeerManager { debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error)); self.on_dial_failure(peer_id); } - FromSwarm::ExternalAddrConfirmed(_) => { - // We have an external address confirmed, means we are able to do NAT traversal. - metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1); - } _ => { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release // notes more than compiler feedback @@ -160,8 +158,8 @@ impl NetworkBehaviour for PeerManager { ) -> Result<(), ConnectionDenied> { // get the IP address to verify it's not banned. let ip = match remote_addr.iter().next() { - Some(libp2p::multiaddr::Protocol::Ip6(ip)) => IpAddr::V6(ip), - Some(libp2p::multiaddr::Protocol::Ip4(ip)) => IpAddr::V4(ip), + Some(Protocol::Ip6(ip)) => IpAddr::V6(ip), + Some(Protocol::Ip4(ip)) => IpAddr::V4(ip), _ => { return Err(ConnectionDenied::new(format!( "Connection to peer rejected: invalid multiaddr: {remote_addr}" @@ -207,6 +205,14 @@ impl NetworkBehaviour for PeerManager { )); } + // We have an inbound connection, this is indicative of having our libp2p NAT ports open. We + // distinguish between ipv4 and ipv6 here: + match remote_addr.iter().next() { + Some(Protocol::Ip4(_)) => set_gauge_vec(&NAT_OPEN, &["libp2p_ipv4"], 1), + Some(Protocol::Ip6(_)) => set_gauge_vec(&NAT_OPEN, &["libp2p_ipv6"], 1), + _ => {} + } + Ok(ConnectionHandler) } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 9bdecab70b..5d86936d41 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -682,10 +682,15 @@ fn handle_rpc_response( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlobsByRangeV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( - BlobSidecar::from_ssz_bytes(decoded_buffer)?, - )))), - Some(_) => Err(RPCError::ErrorResponse( + Some(ForkName::Deneb) | Some(ForkName::Electra) => { + Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } + Some(ForkName::Base) + | Some(ForkName::Altair) + | Some(ForkName::Bellatrix) + | Some(ForkName::Capella) => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by range".to_string(), )), @@ -698,10 +703,15 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlobsByRootV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( - BlobSidecar::from_ssz_bytes(decoded_buffer)?, - )))), - Some(_) => Err(RPCError::ErrorResponse( + Some(ForkName::Deneb) | Some(ForkName::Electra) => { + Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } + Some(ForkName::Base) + | Some(ForkName::Altair) + | Some(ForkName::Bellatrix) + | Some(ForkName::Capella) => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by root".to_string(), )), @@ -1376,6 +1386,16 @@ mod tests { Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRangeV1, + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + ForkName::Electra, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, @@ -1386,6 +1406,16 @@ mod tests { Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRootV1, + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + ForkName::Electra, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, @@ -1400,6 +1430,20 @@ mod tests { ))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRangeV1, + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( + empty_data_column_sidecar() + )), + ForkName::Electra, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::DataColumnsByRange( + empty_data_column_sidecar() + ))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, @@ -1413,6 +1457,20 @@ mod tests { empty_data_column_sidecar() ))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRootV1, + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( + empty_data_column_sidecar() + )), + ForkName::Electra, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::DataColumnsByRoot( + empty_data_column_sidecar() + ))), + ); } // Test RPCResponse encoding/decoding for V1 messages diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 42ece6dc4f..7b3a59eac7 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -104,15 +104,14 @@ impl RateLimiterConfig { pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); - pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(1024, 10); + // The number is chosen to balance between upload bandwidth required to serve + // blocks and a decent syncing rate for honest nodes. Malicious nodes would need to + // spread out their requests over the time window to max out bandwidth on the server. + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); - // `BlocksByRange` and `BlobsByRange` are sent together during range sync. - // It makes sense for blocks and blobs quotas to be equivalent in terms of the number of blocks: - // 1024 blocks * 6 max blobs per block. - // This doesn't necessarily mean that we are sending this many blobs, because the quotas are - // measured against the maximum request size. - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(6144, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(768, 10); + // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index e76d6d2786..0a0a6ca754 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -964,6 +964,9 @@ where request_info: (Id, RequestType), error: StreamUpgradeError, ) { + // This dialing is now considered failed + self.dial_negotiated -= 1; + let (id, req) = request_info; // map the error @@ -989,9 +992,6 @@ where StreamUpgradeError::Apply(other) => other, }; - // This dialing is now considered failed - self.dial_negotiated -= 1; - self.outbound_io_error_retries = 0; self.events_out .push(HandlerEvent::Err(HandlerErr::Outbound { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 54b9447e2b..e4e8232e18 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,11 +18,11 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, - LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, EthSpecId, ForkContext, + ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, Signature, - SignedBeaconBlock, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, + Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -105,6 +105,12 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. +pub static BLOB_SIDECAR_SIZE: LazyLock = + LazyLock::new(BlobSidecar::::max_size); + +pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock = + LazyLock::new(BlobSidecar::::max_size); + pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -668,10 +674,14 @@ impl ProtocolId { } pub fn rpc_blob_limits() -> RpcLimits { - RpcLimits::new( - BlobSidecar::::empty().as_ssz_bytes().len(), - BlobSidecar::::max_size(), - ) + match E::spec_name() { + EthSpecId::Minimal => { + RpcLimits::new(*BLOB_SIDECAR_SIZE_MINIMAL, *BLOB_SIDECAR_SIZE_MINIMAL) + } + EthSpecId::Mainnet | EthSpecId::Gnosis => { + RpcLimits::new(*BLOB_SIDECAR_SIZE, *BLOB_SIDECAR_SIZE) + } + } } pub fn rpc_data_column_limits() -> RpcLimits { @@ -791,7 +801,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => 1, RequestType::LightClientOptimisticUpdate => 1, RequestType::LightClientFinalityUpdate => 1, - RequestType::LightClientUpdatesByRange(req) => req.max_requested(), + RequestType::LightClientUpdatesByRange(req) => req.count, } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index cb22815390..85fabbb0c3 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -67,7 +67,6 @@ pub struct SamplingRequestId(pub usize); #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct CustodyId { pub requester: CustodyRequester, - pub req_id: Id, } /// Downstream components that perform custody by root requests. diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 0ad31ff2e8..e46c69dc71 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -250,18 +250,17 @@ impl futures::stream::Stream for GossipCache { Poll::Ready(Some(expired)) => { let expected_key = expired.key(); let (topic, data) = expired.into_inner(); - match self.topic_msgs.get_mut(&topic) { - Some(msgs) => { - let key = msgs.remove(&data); - debug_assert_eq!(key, Some(expected_key)); - if msgs.is_empty() { - // no more messages for this topic. - self.topic_msgs.remove(&topic); - } - } - None => { - #[cfg(debug_assertions)] - panic!("Topic for registered message is not present.") + let topic_msg = self.topic_msgs.get_mut(&topic); + debug_assert!( + topic_msg.is_some(), + "Topic for registered message is not present." + ); + if let Some(msgs) = topic_msg { + let key = msgs.remove(&data); + debug_assert_eq!(key, Some(expected_key)); + if msgs.is_empty() { + // no more messages for this topic. + self.topic_msgs.remove(&topic); } } Poll::Ready(Some(Ok(topic))) diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index c6a764bb0e..6fffd649f5 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,5 +1,5 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; -use crate::{error, TopicHash}; +use crate::TopicHash; use gossipsub::{IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams}; use std::cmp::max; use std::collections::HashMap; @@ -84,7 +84,7 @@ impl PeerScoreSettings { thresholds: &PeerScoreThresholds, enr_fork_id: &EnrForkId, current_slot: Slot, - ) -> error::Result { + ) -> Result { let mut params = PeerScoreParams { decay_interval: self.decay_interval, decay_to_zero: self.decay_to_zero, @@ -175,7 +175,7 @@ impl PeerScoreSettings { &self, active_validators: usize, current_slot: Slot, - ) -> error::Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams)> { + ) -> Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams), String> { let (aggregators_per_slot, committees_per_slot) = self.expected_aggregator_count_per_slot(active_validators)?; let multiple_bursts_per_subnet_per_epoch = @@ -256,7 +256,7 @@ impl PeerScoreSettings { fn expected_aggregator_count_per_slot( &self, active_validators: usize, - ) -> error::Result<(f64, usize)> { + ) -> Result<(f64, usize), String> { let committees_per_slot = E::get_committee_count_per_slot_with( active_validators, self.max_committees_per_slot, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 056b6be24d..afcbfce173 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -20,7 +20,7 @@ use crate::types::{ }; use crate::EnrExt; use crate::Eth2Enr; -use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ @@ -37,10 +37,8 @@ use slog::{crit, debug, info, o, trace, warn}; use std::num::{NonZeroU8, NonZeroUsize}; use std::path::PathBuf; use std::pin::Pin; -use std::{ - sync::Arc, - task::{Context, Poll}, -}; +use std::sync::Arc; +use std::time::Duration; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; @@ -173,7 +171,7 @@ impl Network { executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, log: &slog::Logger, - ) -> error::Result<(Self, Arc>)> { + ) -> Result<(Self, Arc>), String> { let log = log.new(o!("service"=> "libp2p")); let config = ctx.config.clone(); @@ -469,6 +467,8 @@ impl Network { let config = libp2p::swarm::Config::with_executor(Executor(executor)) .with_notify_handler_buffer_size(NonZeroUsize::new(7).expect("Not zero")) .with_per_connection_event_buffer_size(4) + .with_idle_connection_timeout(Duration::from_secs(10)) // Other clients can timeout + // during negotiation .with_dial_concurrency_factor(NonZeroU8::new(1).unwrap()); let builder = SwarmBuilder::with_existing_identity(local_keypair) @@ -518,7 +518,7 @@ impl Network { /// - Starts listening in the given ports. /// - Dials boot-nodes and libp2p peers. /// - Subscribes to starting gossipsub topics. - async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { + async fn start(&mut self, config: &crate::NetworkConfig) -> Result<(), String> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery, "quic_enabled" => !config.disable_quic_support); @@ -923,7 +923,7 @@ impl Network { &mut self, active_validators: usize, current_slot: Slot, - ) -> error::Result<()> { + ) -> Result<(), String> { let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) = self.score_settings .get_dynamic_topic_params(active_validators, current_slot)?; @@ -1794,157 +1794,148 @@ impl Network { /* Networking polling */ - /// Poll the p2p networking stack. - /// - /// This will poll the swarm and do maintenance routines. - pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { - while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { - let maybe_event = match swarm_event { - SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { - // Handle sub-behaviour events. - BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), - BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), - // Inform the peer manager about discovered peers. - // - // The peer manager will subsequently decide which peers need to be dialed and then dial - // them. - BehaviourEvent::Discovery(DiscoveredPeers { peers }) => { - self.peer_manager_mut().peers_discovered(peers); - None + pub async fn next_event(&mut self) -> NetworkEvent { + loop { + tokio::select! { + // Poll the libp2p `Swarm`. + // This will poll the swarm and do maintenance routines. + Some(event) = self.swarm.next() => { + if let Some(event) = self.parse_swarm_event(event) { + return event; } - BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), - BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), - BehaviourEvent::Upnp(e) => { - self.inject_upnp_event(e); - None - } - #[allow(unreachable_patterns)] - BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, - SwarmEvent::ConnectionEstablished { .. } => None, - SwarmEvent::ConnectionClosed { .. } => None, - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - connection_id: _, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); - None + + // perform gossipsub score updates when necessary + _ = self.update_gossipsub_scores.tick() => { + let this = self.swarm.behaviour_mut(); + this.peer_manager.update_gossipsub_scores(&this.gossipsub); } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - connection_id: _, - } => { - let error_repr = match error { - libp2p::swarm::ListenError::Aborted => { - "Incoming connection aborted".to_string() + // poll the gossipsub cache to clear expired messages + Some(result) = self.gossip_cache.next() => { + match result { + Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), + Ok(expired_topic) => { + if let Some(v) = metrics::get_int_counter( + &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, + &[expired_topic.kind().as_ref()], + ) { + v.inc() + }; } - libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { - format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") - } - libp2p::swarm::ListenError::LocalPeerId { endpoint } => { - format!("Dialing local peer id {endpoint:?}") - } - libp2p::swarm::ListenError::Denied { cause } => { - format!("Connection was denied with cause: {cause:?}") - } - libp2p::swarm::ListenError::Transport(t) => match t { - libp2p::TransportError::MultiaddrNotSupported(m) => { - format!("Transport error: Multiaddr not supported: {m}") - } - libp2p::TransportError::Other(e) => { - format!("Transport error: other: {e}") - } - }, - }; - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); - None - } - SwarmEvent::OutgoingConnectionError { - peer_id: _, - error: _, - connection_id: _, - } => { - // The Behaviour event is more general than the swarm event here. It includes - // connection failures. So we use that log for now, in the peer manager - // behaviour implementation. - None - } - SwarmEvent::NewListenAddr { address, .. } => { - Some(NetworkEvent::NewListenAddr(address)) - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address); - None - } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - match reason { - Ok(_) => { - debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) - } - Err(reason) => { - crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) - } - }; - if Swarm::listeners(&self.swarm).count() == 0 { - Some(NetworkEvent::ZeroListeners) - } else { - None } } - SwarmEvent::ListenerError { error, .. } => { - // Ignore quic accept and close errors. - if let Some(error) = error - .get_ref() - .and_then(|err| err.downcast_ref::()) - .filter(|err| matches!(err, libp2p::quic::Error::Connection(_))) - { - debug!(self.log, "Listener closed quic connection"; "reason" => ?error); - } else { - warn!(self.log, "Listener error"; "error" => ?error); - } - None - } - _ => { - // NOTE: SwarmEvent is a non exhaustive enum so updates should be based on - // release notes more than compiler feedback - None - } - }; - - if let Some(ev) = maybe_event { - return Poll::Ready(ev); - } - } - - // perform gossipsub score updates when necessary - while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - let this = self.swarm.behaviour_mut(); - this.peer_manager.update_gossipsub_scores(&this.gossipsub); - } - - // poll the gossipsub cache to clear expired messages - while let Poll::Ready(Some(result)) = self.gossip_cache.poll_next_unpin(cx) { - match result { - Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), - Ok(expired_topic) => { - if let Some(v) = metrics::get_int_counter( - &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, - &[expired_topic.kind().as_ref()], - ) { - v.inc() - }; - } } } - Poll::Pending } - pub async fn next_event(&mut self) -> NetworkEvent { - futures::future::poll_fn(|cx| self.poll_network(cx)).await + fn parse_swarm_event( + &mut self, + event: SwarmEvent>, + ) -> Option> { + match event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + // Handle sub-behaviour events. + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + // Inform the peer manager about discovered peers. + // + // The peer manager will subsequently decide which peers need to be dialed and then dial + // them. + BehaviourEvent::Discovery(DiscoveredPeers { peers }) => { + self.peer_manager_mut().peers_discovered(peers); + None + } + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + BehaviourEvent::Upnp(e) => { + self.inject_upnp_event(e); + None + } + #[allow(unreachable_patterns)] + BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + connection_id: _, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + None + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + connection_id: _, + } => { + let error_repr = match error { + libp2p::swarm::ListenError::Aborted => { + "Incoming connection aborted".to_string() + } + libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => { + format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}") + } + libp2p::swarm::ListenError::LocalPeerId { endpoint } => { + format!("Dialing local peer id {endpoint:?}") + } + libp2p::swarm::ListenError::Denied { cause } => { + format!("Connection was denied with cause: {cause:?}") + } + libp2p::swarm::ListenError::Transport(t) => match t { + libp2p::TransportError::MultiaddrNotSupported(m) => { + format!("Transport error: Multiaddr not supported: {m}") + } + libp2p::TransportError::Other(e) => { + format!("Transport error: other: {e}") + } + }, + }; + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); + None + } + SwarmEvent::OutgoingConnectionError { + peer_id: _, + error: _, + connection_id: _, + } => { + // The Behaviour event is more general than the swarm event here. It includes + // connection failures. So we use that log for now, in the peer manager + // behaviour implementation. + None + } + SwarmEvent::NewListenAddr { address, .. } => Some(NetworkEvent::NewListenAddr(address)), + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address); + None + } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + match reason { + Ok(_) => { + debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) + } + Err(reason) => { + crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) + } + }; + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::ListenerError { error, .. } => { + debug!(self.log, "Listener closed connection attempt"; "reason" => ?error); + None + } + _ => { + // NOTE: SwarmEvent is a non exhaustive enum so updates should be based on + // release notes more than compiler feedback + None + } + } } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index f4988e68cd..490928c08c 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,9 +1,7 @@ use crate::multiaddr::Protocol; use crate::rpc::methods::MetaDataV3; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; -use crate::types::{ - error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, -}; +use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind}; use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; use gossipsub; @@ -83,7 +81,7 @@ pub fn build_transport( // Useful helper functions for debugging. Currently not used in the client. #[allow(dead_code)] -fn keypair_from_hex(hex_bytes: &str) -> error::Result { +fn keypair_from_hex(hex_bytes: &str) -> Result { let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { stripped.to_string() } else { @@ -91,18 +89,18 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { }; hex::decode(hex_bytes) - .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e)) .and_then(keypair_from_bytes) } #[allow(dead_code)] -fn keypair_from_bytes(mut bytes: Vec) -> error::Result { +fn keypair_from_bytes(mut bytes: Vec) -> Result { secp256k1::SecretKey::try_from_bytes(&mut bytes) .map(|secret| { let keypair: secp256k1::Keypair = secret.into(); keypair.into() }) - .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e)) } /// Loads a private key from disk. If this fails, a new key is diff --git a/beacon_node/lighthouse_network/src/types/error.rs b/beacon_node/lighthouse_network/src/types/error.rs deleted file mode 100644 index a291e8fec5..0000000000 --- a/beacon_node/lighthouse_network/src/types/error.rs +++ /dev/null @@ -1,5 +0,0 @@ -// generates error types - -use error_chain::error_chain; - -error_chain! {} diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index bcebd02a0e..92583b7b5d 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -82,7 +82,7 @@ impl NetworkGlobals { peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), - backfill_state: RwLock::new(BackFillState::NotRequired), + backfill_state: RwLock::new(BackFillState::Paused), sampling_subnets, sampling_columns, config, diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 82558f6c97..6f266fd2ba 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -1,4 +1,3 @@ -pub mod error; mod globals; mod pubsub; mod subnet; diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index 4322763fc5..0519d6f4b0 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -35,8 +35,6 @@ pub enum BackFillState { Syncing, /// A backfill sync has completed. Completed, - /// A backfill sync is not required. - NotRequired, /// Too many failed attempts at backfilling. Consider it failed. Failed, } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 6d61bffe3d..44f6c54bbc 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,52 +5,51 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] -sloggers = { workspace = true } +bls = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } genesis = { workspace = true } +gossipsub = { workspace = true } +kzg = { workspace = true } matches = "0.1.8" serde_json = { workspace = true } -slog-term = { workspace = true } slog-async = { workspace = true } -eth2 = { workspace = true } -gossipsub = { workspace = true } -eth2_network_config = { workspace = true } -kzg = { workspace = true } -bls = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } [dependencies] alloy-primitives = { workspace = true } -async-channel = { workspace = true } -anyhow = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } -slot_clock = { workspace = true } -slog = { workspace = true } -hex = { workspace = true } -ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } -futures = { workspace = true } -error-chain = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -smallvec = { workspace = true } -rand = { workspace = true } -fnv = { workspace = true } alloy-rlp = { workspace = true } -lighthouse_metrics = { workspace = true } -logging = { workspace = true } -task_executor = { workspace = true } +anyhow = { workspace = true } +async-channel = { workspace = true } +beacon_chain = { workspace = true } +beacon_processor = { workspace = true } +delay_map = { workspace = true } +derivative = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +fnv = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } igd-next = "0.14" itertools = { workspace = true } +lighthouse_network = { workspace = true } +logging = { workspace = true } lru_cache = { workspace = true } -strum = { workspace = true } -derivative = { workspace = true } -delay_map = { workspace = true } +metrics = { workspace = true } operation_pool = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +smallvec = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +types = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs deleted file mode 100644 index 1a964235e9..0000000000 --- a/beacon_node/network/src/error.rs +++ /dev/null @@ -1,8 +0,0 @@ -// generates error types -use error_chain::error_chain; - -error_chain! { - links { - Libp2p(lighthouse_network::error::Error, lighthouse_network::error::ErrorKind); - } -} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 13a2569b75..2a7fedb53e 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,5 +1,4 @@ /// This crate provides the network server for Lighthouse. -pub mod error; pub mod service; mod metrics; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 9e42aa8e92..154a59eade 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -2,21 +2,29 @@ use beacon_chain::{ attestation_verification::Error as AttnError, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::Error as SyncCommitteeError, AvailabilityProcessingStatus, + BlockError, }; use fnv::FnvHashMap; -pub use lighthouse_metrics::*; use lighthouse_network::{ peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, NetworkGlobals, }; +pub use metrics::*; use std::sync::{Arc, LazyLock}; +use strum::AsRefStr; use strum::IntoEnumIterator; use types::EthSpec; pub const SUCCESS: &str = "SUCCESS"; pub const FAILURE: &str = "FAILURE"; +#[derive(Debug, AsRefStr)] +pub(crate) enum BlockSource { + Gossip, + Rpc, +} + pub static BEACON_BLOCK_MESH_PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( @@ -59,6 +67,27 @@ pub static SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: LazyLock> = ) }); +/* + * Beacon processor + */ +pub static BEACON_PROCESSOR_MISSING_COMPONENTS: LazyLock> = LazyLock::new( + || { + try_create_int_counter_vec( + "beacon_processor_missing_components_total", + "Total number of imported individual block components that resulted in missing components", + &["source", "component"], + ) + }, +); +pub static BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_import_errors_total", + "Total number of block components that were not verified", + &["source", "component", "type"], + ) + }); + /* * Gossip processor */ @@ -606,6 +635,37 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub(crate) fn register_process_result_metrics( + result: &std::result::Result, + source: BlockSource, + block_component: &'static str, +) { + match result { + Ok(status) => match status { + AvailabilityProcessingStatus::Imported { .. } => match source { + BlockSource::Gossip => { + inc_counter(&BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + } + BlockSource::Rpc => { + inc_counter(&BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + } + }, + AvailabilityProcessingStatus::MissingComponents { .. } => { + inc_counter_vec( + &BEACON_PROCESSOR_MISSING_COMPONENTS, + &[source.as_ref(), block_component], + ); + } + }, + Err(error) => { + inc_counter_vec( + &BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE, + &[source.as_ref(), block_component, error.as_ref()], + ); + } + } +} + pub fn from_result(result: &std::result::Result) -> &str { match result { Ok(_) => SUCCESS, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 4d875cb4a1..f3c48e42f0 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1,5 +1,5 @@ use crate::{ - metrics, + metrics::{self, register_process_result_metrics}, network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}, service::NetworkMessage, sync::SyncMessage, @@ -710,8 +710,19 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } + GossipDataColumnError::PriorKnown { .. } => { + // Data column is available via either the EL or reconstruction. + // Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available column sidecar. Ignoring the column sidecar"; + "slot" => %slot, + "block_root" => %block_root, + "index" => %index, + ) + } GossipDataColumnError::FutureSlot { .. } - | GossipDataColumnError::PriorKnown { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { debug!( self.log, @@ -852,7 +863,18 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } - GossipBlobError::FutureSlot { .. } | GossipBlobError::RepeatBlob { .. } => { + GossipBlobError::RepeatBlob { .. } => { + // We may have received the blob from the EL. Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available blob sidecar. Ignoring the blob sidecar"; + "slot" => %slot, + "root" => %root, + "index" => %index, + ) + } + GossipBlobError::FutureSlot { .. } => { debug!( self.log, "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; @@ -914,18 +936,14 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - let result = self - .chain - .process_gossip_blob(verified_blob, || Ok(())) - .await; + let result = self.chain.process_gossip_blob(verified_blob).await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "blob"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - // Note: Reusing block imported metric here - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); info!( self.log, - "Gossipsub blob processed, imported fully available block"; + "Gossipsub blob processed - imported fully available block"; "block_root" => %block_root ); self.chain.recompute_head_at_current_slot().await; @@ -936,9 +954,9 @@ impl NetworkBeaconProcessor { ); } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { - trace!( + debug!( self.log, - "Processed blob, waiting for other components"; + "Processed gossip blob - waiting for other components"; "slot" => %slot, "blob_index" => %blob_index, "block_root" => %block_root, @@ -992,43 +1010,39 @@ impl NetworkBeaconProcessor { let data_column_slot = verified_data_column.slot(); let data_column_index = verified_data_column.id().index; - match self + let result = self .chain .process_gossip_data_columns(vec![verified_data_column], || Ok(())) - .await - { - Ok(availability) => { - match availability { - AvailabilityProcessingStatus::Imported(block_root) => { - // Note: Reusing block imported metric here - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL, - ); - info!( - self.log, - "Gossipsub data column processed, imported fully available block"; - "block_root" => %block_root - ); - self.chain.recompute_head_at_current_slot().await; + .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "data_column"); - metrics::set_gauge( - &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, - processing_start_time.elapsed().as_millis() as i64, - ); - } - AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { - trace!( - self.log, - "Processed data column, waiting for other components"; - "slot" => %slot, - "data_column_index" => %data_column_index, - "block_root" => %block_root, - ); + match result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + self.log, + "Gossipsub data column processed, imported fully available block"; + "block_root" => %block_root + ); + self.chain.recompute_head_at_current_slot().await; - self.attempt_data_column_reconstruction(block_root).await; - } + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } - } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + self.log, + "Processed data column, waiting for other components"; + "slot" => %slot, + "data_column_index" => %data_column_index, + "block_root" => %block_root, + ); + + self.attempt_data_column_reconstruction(block_root).await; + } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -1079,7 +1093,7 @@ impl NetworkBeaconProcessor { message_id, peer_id, peer_client, - block, + block.clone(), reprocess_tx.clone(), seen_duration, ) @@ -1447,6 +1461,20 @@ impl NetworkBeaconProcessor { } } + // Block is gossip valid. Attempt to fetch blobs from the EL using versioned hashes derived + // from kzg commitments, without having to wait for all blobs to be sent from the peers. + let publish_blobs = true; + let self_clone = self.clone(); + let block_clone = block.clone(); + self.executor.spawn( + async move { + self_clone + .fetch_engine_blobs_and_publish(block_clone, block_root, publish_blobs) + .await + }, + "fetch_blobs_gossip", + ); + let result = self .chain .process_block_with_early_caching( @@ -1456,11 +1484,10 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "block"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - if reprocess_tx .try_send(ReprocessQueueMessage::BlockImported { block_root: *block_root, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 76f5e886ff..d81d964e7c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,11 +1,17 @@ use crate::sync::manager::BlockProcessType; use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_column_verification::{observe_gossip_data_column, GossipDataColumnError}; +use beacon_chain::fetch_blobs::{ + fetch_and_process_engine_blobs, BlobsOrDataColumns, FetchEngineBlobError, +}; +use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, AvailabilityProcessingStatus, BeaconChain, + BeaconChainTypes, BlockError, NotifyExecutionLayer, }; -use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, @@ -21,7 +27,8 @@ use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, }; -use slog::{debug, error, trace, Logger}; +use rand::prelude::SliceRandom; +use slog::{debug, error, trace, warn, Logger}; use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; @@ -67,6 +74,9 @@ pub struct NetworkBeaconProcessor { pub log: Logger, } +// Publish blobs in batches of exponentially increasing size. +const BLOB_PUBLICATION_EXP_FACTOR: usize = 2; + impl NetworkBeaconProcessor { fn try_send(&self, event: BeaconWorkEvent) -> Result<(), Error> { self.beacon_processor_send @@ -878,6 +888,79 @@ impl NetworkBeaconProcessor { }); } + pub async fn fetch_engine_blobs_and_publish( + self: &Arc, + block: Arc>>, + block_root: Hash256, + publish_blobs: bool, + ) { + let self_cloned = self.clone(); + let publish_fn = move |blobs_or_data_column| { + if publish_blobs { + match blobs_or_data_column { + BlobsOrDataColumns::Blobs(blobs) => { + self_cloned.publish_blobs_gradually(blobs, block_root); + } + BlobsOrDataColumns::DataColumns(columns) => { + self_cloned.publish_data_columns_gradually(columns, block_root); + } + }; + } + }; + + match fetch_and_process_engine_blobs( + self.chain.clone(), + block_root, + block.clone(), + publish_fn, + ) + .await + { + Ok(Some(availability)) => match availability { + AvailabilityProcessingStatus::Imported(_) => { + debug!( + self.log, + "Block components retrieved from EL"; + "result" => "imported block and custody columns", + "block_root" => %block_root, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Still missing blobs after engine blobs processed successfully"; + "block_root" => %block_root, + ); + } + }, + Ok(None) => { + debug!( + self.log, + "Fetch blobs completed without import"; + "block_root" => %block_root, + ); + } + Err(FetchEngineBlobError::BlobProcessingError(BlockError::DuplicateFullyImported( + .., + ))) => { + debug!( + self.log, + "Fetch blobs duplicate import"; + "block_root" => %block_root, + ); + } + Err(e) => { + error!( + self.log, + "Error fetching or processing blobs from EL"; + "error" => ?e, + "block_root" => %block_root, + ); + } + } + } + /// Attempt to reconstruct all data columns if the following conditions satisfies: /// - Our custody requirement is all columns /// - We have >= 50% of columns, but not all columns @@ -885,25 +968,13 @@ impl NetworkBeaconProcessor { /// Returns `Some(AvailabilityProcessingStatus)` if reconstruction is successfully performed, /// otherwise returns `None`. async fn attempt_data_column_reconstruction( - &self, + self: &Arc, block_root: Hash256, ) -> Option { let result = self.chain.reconstruct_data_columns(block_root).await; match result { Ok(Some((availability_processing_status, data_columns_to_publish))) => { - self.send_network_message(NetworkMessage::Publish { - messages: data_columns_to_publish - .iter() - .map(|d| { - let subnet = DataColumnSubnetId::from_column_index::( - d.index as usize, - &self.chain.spec, - ); - PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) - }) - .collect(), - }); - + self.publish_data_columns_gradually(data_columns_to_publish, block_root); match &availability_processing_status { AvailabilityProcessingStatus::Imported(hash) => { debug!( @@ -946,6 +1017,175 @@ impl NetworkBeaconProcessor { } } } + + /// This function gradually publishes blobs to the network in randomised batches. + /// + /// This is an optimisation to reduce outbound bandwidth and ensures each blob is published + /// by some nodes on the network as soon as possible. Our hope is that some blobs arrive from + /// other nodes in the meantime, obviating the need for us to publish them. If no other + /// publisher exists for a blob, it will eventually get published here. + fn publish_blobs_gradually( + self: &Arc, + mut blobs: Vec>, + block_root: Hash256, + ) { + let self_clone = self.clone(); + + self.executor.spawn( + async move { + let chain = self_clone.chain.clone(); + let log = self_clone.chain.logger(); + let publish_fn = |blobs: Vec>>| { + self_clone.send_network_message(NetworkMessage::Publish { + messages: blobs + .into_iter() + .map(|blob| PubsubMessage::BlobSidecar(Box::new((blob.index, blob)))) + .collect(), + }); + }; + + // Permute the blobs and split them into batches. + // The hope is that we won't need to publish some blobs because we will receive them + // on gossip from other nodes. + blobs.shuffle(&mut rand::thread_rng()); + + let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; + let mut publish_count = 0usize; + let blob_count = blobs.len(); + let mut blobs_iter = blobs.into_iter().peekable(); + let mut batch_size = 1usize; + + while blobs_iter.peek().is_some() { + let batch = blobs_iter.by_ref().take(batch_size); + let publishable = batch + .filter_map(|unobserved| match unobserved.observe(&chain) { + Ok(observed) => Some(observed.clone_blob()), + Err(GossipBlobError::RepeatBlob { .. }) => None, + Err(e) => { + warn!( + log, + "Previously verified blob is invalid"; + "error" => ?e + ); + None + } + }) + .collect::>(); + + if !publishable.is_empty() { + debug!( + log, + "Publishing blob batch"; + "publish_count" => publishable.len(), + "block_root" => ?block_root, + ); + publish_count += publishable.len(); + publish_fn(publishable); + } + + tokio::time::sleep(blob_publication_batch_interval).await; + batch_size *= BLOB_PUBLICATION_EXP_FACTOR; + } + + debug!( + log, + "Batch blob publication complete"; + "batch_interval" => blob_publication_batch_interval.as_millis(), + "blob_count" => blob_count, + "published_count" => publish_count, + "block_root" => ?block_root, + ) + }, + "gradual_blob_publication", + ); + } + + /// This function gradually publishes data columns to the network in randomised batches. + /// + /// This is an optimisation to reduce outbound bandwidth and ensures each column is published + /// by some nodes on the network as soon as possible. Our hope is that some columns arrive from + /// other supernodes in the meantime, obviating the need for us to publish them. If no other + /// publisher exists for a column, it will eventually get published here. + fn publish_data_columns_gradually( + self: &Arc, + mut data_columns_to_publish: DataColumnSidecarList, + block_root: Hash256, + ) { + let self_clone = self.clone(); + + self.executor.spawn( + async move { + let chain = self_clone.chain.clone(); + let log = self_clone.chain.logger(); + let publish_fn = |columns: DataColumnSidecarList| { + self_clone.send_network_message(NetworkMessage::Publish { + messages: columns + .into_iter() + .map(|d| { + let subnet = DataColumnSubnetId::from_column_index::( + d.index as usize, + &chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, d))) + }) + .collect(), + }); + }; + + // If this node is a super node, permute the columns and split them into batches. + // The hope is that we won't need to publish some columns because we will receive them + // on gossip from other supernodes. + data_columns_to_publish.shuffle(&mut rand::thread_rng()); + + let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; + let blob_publication_batches = chain.config.blob_publication_batches; + let batch_size = chain.spec.number_of_columns / blob_publication_batches; + let mut publish_count = 0usize; + + for batch in data_columns_to_publish.chunks(batch_size) { + let publishable = batch + .iter() + .filter_map(|col| match observe_gossip_data_column(col, &chain) { + Ok(()) => Some(col.clone()), + Err(GossipDataColumnError::PriorKnown { .. }) => None, + Err(e) => { + warn!( + log, + "Previously verified data column is invalid"; + "error" => ?e + ); + None + } + }) + .collect::>(); + + if !publishable.is_empty() { + debug!( + log, + "Publishing data column batch"; + "publish_count" => publishable.len(), + "block_root" => ?block_root, + ); + publish_count += publishable.len(); + publish_fn(publishable); + } + + tokio::time::sleep(blob_publication_batch_interval).await; + } + + debug!( + log, + "Batch data column publishing complete"; + "batch_size" => batch_size, + "batch_interval" => blob_publication_batch_interval.as_millis(), + "data_columns_to_publish_count" => data_columns_to_publish.len(), + "published_count" => publish_count, + "block_root" => ?block_root, + ) + }, + "gradual_data_column_publication", + ); + } } type TestBeaconChainType = diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 6d32806713..c4944078fe 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -2,7 +2,7 @@ use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERA use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; +use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use itertools::process_results; use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ @@ -682,12 +682,10 @@ impl NetworkBeaconProcessor { .forwards_iter_block_roots(Slot::from(*req.start_slot())) { Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot @@ -941,12 +939,10 @@ impl NetworkBeaconProcessor { let forwards_block_root_iter = match self.chain.forwards_iter_block_roots(request_start_slot) { Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot @@ -1147,12 +1143,10 @@ impl NetworkBeaconProcessor { let forwards_block_root_iter = match self.chain.forwards_iter_block_roots(request_start_slot) { Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockError( - HistoricalBlockError::BlockOutOfRange { - slot, - oldest_block_slot, - }, - )) => { + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 82d06c20f8..817e6b6440 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,4 +1,4 @@ -use crate::metrics; +use crate::metrics::{self, register_process_result_metrics}; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::sync::BatchProcessResult; use crate::sync::{ @@ -10,8 +10,8 @@ use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; use beacon_chain::data_column_verification::verify_kzg_for_data_column_list; use beacon_chain::{ - validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainError, - BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, + validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainTypes, + BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use beacon_processor::{ work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, @@ -153,6 +153,7 @@ impl NetworkBeaconProcessor { "process_type" => ?process_type, ); + let signed_beacon_block = block.block_cloned(); let result = self .chain .process_block_with_early_caching( @@ -162,30 +163,40 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; - - metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "block"); // RPC block imported, regardless of process type - if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result { - info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); + match result.as_ref() { + Ok(AvailabilityProcessingStatus::Imported(hash)) => { + info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); - // Trigger processing for work referencing this block. - let reprocess_msg = ReprocessQueueMessage::BlockImported { - block_root: hash, - parent_root, - }; - if reprocess_tx.try_send(reprocess_msg).is_err() { - error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) - }; - self.chain.block_times_cache.write().set_time_observed( - hash, - slot, - seen_timestamp, - None, - None, - ); + // Trigger processing for work referencing this block. + let reprocess_msg = ReprocessQueueMessage::BlockImported { + block_root: *hash, + parent_root, + }; + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) + }; + self.chain.block_times_cache.write().set_time_observed( + *hash, + slot, + seen_timestamp, + None, + None, + ); - self.chain.recompute_head_at_current_slot().await; + self.chain.recompute_head_at_current_slot().await; + } + Ok(AvailabilityProcessingStatus::MissingComponents(..)) => { + // Block is valid, we can now attempt fetching blobs from EL using version hashes + // derived from kzg commitments from the block, without having to wait for all blobs + // to be sent from the peers if we already have them. + let publish_blobs = false; + self.fetch_engine_blobs_and_publish(signed_beacon_block, block_root, publish_blobs) + .await + } + _ => {} } // RPC block imported or execution validated. If the block was already imported by gossip we @@ -274,6 +285,7 @@ impl NetworkBeaconProcessor { } let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "blobs"); match &result { Ok(AvailabilityProcessingStatus::Imported(hash)) => { @@ -331,6 +343,7 @@ impl NetworkBeaconProcessor { .chain .process_rpc_custody_columns(custody_columns) .await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "custody_columns"); match &result { Ok(availability) => match availability { @@ -606,103 +619,69 @@ impl NetworkBeaconProcessor { ); (imported_blocks, Ok(())) } - Err(error) => { + Err(e) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_FAILED_TOTAL, ); - let err = match error { - // Handle the historical block errors specifically - BeaconChainError::HistoricalBlockError(e) => match e { - HistoricalBlockError::MismatchedBlockRoot { - block_root, - expected_block_root, - } => { - debug!( - self.log, - "Backfill batch processing error"; - "error" => "mismatched_block_root", - "block_root" => ?block_root, - "expected_root" => ?expected_block_root - ); - - ChainSegmentFailed { - message: String::from("mismatched_block_root"), - // The peer is faulty if they send blocks with bad roots. - peer_action: Some(PeerAction::LowToleranceError), - } - } - HistoricalBlockError::InvalidSignature - | HistoricalBlockError::SignatureSet(_) => { - warn!( - self.log, - "Backfill batch processing error"; - "error" => ?e - ); - - ChainSegmentFailed { - message: "invalid_signature".into(), - // The peer is faulty if they bad signatures. - peer_action: Some(PeerAction::LowToleranceError), - } - } - HistoricalBlockError::ValidatorPubkeyCacheTimeout => { - warn!( - self.log, - "Backfill batch processing error"; - "error" => "pubkey_cache_timeout" - ); - - ChainSegmentFailed { - message: "pubkey_cache_timeout".into(), - // This is an internal error, do not penalize the peer. - peer_action: None, - } - } - HistoricalBlockError::NoAnchorInfo => { - warn!(self.log, "Backfill not required"); - - ChainSegmentFailed { - message: String::from("no_anchor_info"), - // There is no need to do a historical sync, this is not a fault of - // the peer. - peer_action: None, - } - } - HistoricalBlockError::IndexOutOfBounds => { - error!( - self.log, - "Backfill batch OOB error"; - "error" => ?e, - ); - ChainSegmentFailed { - message: String::from("logic_error"), - // This should never occur, don't penalize the peer. - peer_action: None, - } - } - HistoricalBlockError::BlockOutOfRange { .. } => { - error!( - self.log, - "Backfill batch error"; - "error" => ?e, - ); - ChainSegmentFailed { - message: String::from("unexpected_error"), - // This should never occur, don't penalize the peer. - peer_action: None, - } - } - }, - other => { - warn!(self.log, "Backfill batch processing error"; "error" => ?other); - ChainSegmentFailed { - message: format!("{:?}", other), - // This is an internal error, don't penalize the peer. - peer_action: None, - } + let peer_action = match &e { + HistoricalBlockError::MismatchedBlockRoot { + block_root, + expected_block_root, + } => { + debug!( + self.log, + "Backfill batch processing error"; + "error" => "mismatched_block_root", + "block_root" => ?block_root, + "expected_root" => ?expected_block_root + ); + // The peer is faulty if they send blocks with bad roots. + Some(PeerAction::LowToleranceError) } + HistoricalBlockError::InvalidSignature + | HistoricalBlockError::SignatureSet(_) => { + warn!( + self.log, + "Backfill batch processing error"; + "error" => ?e + ); + // The peer is faulty if they bad signatures. + Some(PeerAction::LowToleranceError) + } + HistoricalBlockError::ValidatorPubkeyCacheTimeout => { + warn!( + self.log, + "Backfill batch processing error"; + "error" => "pubkey_cache_timeout" + ); + // This is an internal error, do not penalize the peer. + None + } + HistoricalBlockError::IndexOutOfBounds => { + error!( + self.log, + "Backfill batch OOB error"; + "error" => ?e, + ); + // This should never occur, don't penalize the peer. + None + } + HistoricalBlockError::StoreError(e) => { + warn!(self.log, "Backfill batch processing error"; "error" => ?e); + // This is an internal error, don't penalize the peer. + None + } // + // Do not use a fallback match, handle all errors explicitly }; - (0, Err(err)) + let err_str: &'static str = e.into(); + ( + 0, + Err(ChainSegmentFailed { + message: format!("{:?}", err_str), + // This is an internal error, don't penalize the peer. + peer_action, + }), + ) } } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index f9b6b06526..8238fa146d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -527,7 +527,7 @@ impl TestRig { self.assert_event_journal( &expected .iter() - .map(|ev| Into::<&'static str>::into(ev)) + .map(Into::<&'static str>::into) .chain(std::iter::once(WORKER_FREED)) .chain(std::iter::once(NOTHING_TO_DO)) .collect::>(), diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index e1badfda9d..0a99b6af0c 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -5,7 +5,6 @@ //! syncing-related responses to the Sync manager. #![allow(clippy::unit_arg)] -use crate::error; use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::status_message; @@ -92,7 +91,7 @@ impl Router { beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, log: slog::Logger, - ) -> error::Result>> { + ) -> Result>, String> { let message_handler_log = log.new(o!("service"=> "router")); trace!(message_handler_log, "Service starting"); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5a66cb7f30..7826807e03 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,13 +1,10 @@ +use crate::metrics; use crate::nat; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; -use crate::subnet_service::SyncCommitteeService; -use crate::{error, metrics}; -use crate::{ - subnet_service::{AttestationService, SubnetServiceMessage}, - NetworkConfig, -}; +use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; +use crate::NetworkConfig; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; use futures::channel::mpsc::Sender; @@ -165,10 +162,8 @@ pub struct NetworkService { beacon_chain: Arc>, /// The underlying libp2p service that drives all the network interactions. libp2p: Network, - /// An attestation and subnet manager service. - attestation_service: AttestationService, - /// A sync committeee subnet manager service. - sync_committee_service: SyncCommitteeService, + /// An attestation and sync committee subnet manager service. + subnet_service: SubnetService, /// The receiver channel for lighthouse to communicate with the network service. network_recv: mpsc::UnboundedReceiver>, /// The receiver channel for lighthouse to send validator subscription requests. @@ -213,11 +208,14 @@ impl NetworkService { libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<( - NetworkService, - Arc>, - NetworkSenders, - )> { + ) -> Result< + ( + NetworkService, + Arc>, + NetworkSenders, + ), + String, + > { let network_log = executor.log().clone(); // build the channels for external comms let (network_senders, network_receivers) = NetworkSenders::new(); @@ -317,16 +315,13 @@ impl NetworkService { network_log.clone(), )?; - // attestation subnet service - let attestation_service = AttestationService::new( + // attestation and sync committee subnet service + let subnet_service = SubnetService::new( beacon_chain.clone(), network_globals.local_enr().node_id(), &config, &network_log, ); - // sync committee subnet service - let sync_committee_service = - SyncCommitteeService::new(beacon_chain.clone(), &config, &network_log); // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -344,8 +339,7 @@ impl NetworkService { let network_service = NetworkService { beacon_chain, libp2p, - attestation_service, - sync_committee_service, + subnet_service, network_recv, validator_subscription_recv, router_send, @@ -376,7 +370,7 @@ impl NetworkService { libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<(Arc>, NetworkSenders)> { + ) -> Result<(Arc>, NetworkSenders), String> { let (network_service, network_globals, network_senders) = Self::build( beacon_chain, config, @@ -460,11 +454,8 @@ impl NetworkService { // handle a message from a validator requesting a subscription to a subnet Some(msg) = self.validator_subscription_recv.recv() => self.on_validator_subscription_msg(msg).await, - // process any attestation service events - Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), - - // process any sync committee service events - Some(msg) = self.sync_committee_service.next() => self.on_sync_committee_service_message(msg), + // process any subnet service events + Some(msg) = self.subnet_service.next() => self.on_subnet_service_msg(msg), event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, @@ -552,13 +543,14 @@ impl NetworkService { match message { // attestation information gets processed in the attestation service PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; + let subnet_id = subnet_and_attestation.0; let attestation = &subnet_and_attestation.1; // checks if we have an aggregator for the slot. If so, we should process // the attestation, else we just just propagate the Attestation. - let should_process = self - .attestation_service - .should_process_attestation(subnet, attestation); + let should_process = self.subnet_service.should_process_attestation( + Subnet::Attestation(subnet_id), + attestation, + ); self.send_to_router(RouterMessage::PubsubMessage( id, source, @@ -832,20 +824,12 @@ impl NetworkService { async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = self - .attestation_service - .validator_subscriptions(subscriptions.into_iter()) - { - warn!(self.log, "Attestation validator subscription failed"; "error" => e); - } + let subscriptions = subscriptions.into_iter().map(Subscription::Attestation); + self.subnet_service.validator_subscriptions(subscriptions) } ValidatorSubscriptionMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = self - .sync_committee_service - .validator_subscriptions(subscriptions) - { - warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); - } + let subscriptions = subscriptions.into_iter().map(Subscription::SyncCommittee); + self.subnet_service.validator_subscriptions(subscriptions) } } } @@ -881,7 +865,7 @@ impl NetworkService { } } - fn on_attestation_service_msg(&mut self, msg: SubnetServiceMessage) { + fn on_subnet_service_msg(&mut self, msg: SubnetServiceMessage) { match msg { SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { @@ -900,36 +884,9 @@ impl NetworkService { SubnetServiceMessage::EnrAdd(subnet) => { self.libp2p.update_enr_subnet(subnet, true); } - SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p.update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p.discover_subnet_peers(subnets_to_discover); - } - } - } - - fn on_sync_committee_service_message(&mut self, msg: SubnetServiceMessage) { - match msg { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in self.required_gossip_fork_digests() { - let topic = - GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in self.required_gossip_fork_digests() { - let topic = - GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p.update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p.update_enr_subnet(subnet, false); + SubnetServiceMessage::EnrRemove(sync_subnet_id) => { + self.libp2p + .update_enr_subnet(Subnet::SyncCommittee(sync_subnet_id), false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { self.libp2p.discover_subnet_peers(subnets_to_discover); diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index b55992c624..32bbfcbcaa 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -1,238 +1,229 @@ -#[cfg(not(debug_assertions))] -#[cfg(test)] -mod tests { - use crate::persisted_dht::load_dht; - use crate::{NetworkConfig, NetworkService}; - use beacon_chain::test_utils::BeaconChainHarness; - use beacon_chain::BeaconChainTypes; - use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; - use futures::StreamExt; - use lighthouse_network::types::{GossipEncoding, GossipKind}; - use lighthouse_network::{Enr, GossipTopic}; - use slog::{o, Drain, Level, Logger}; - use sloggers::{null::NullLoggerBuilder, Build}; - use std::str::FromStr; - use std::sync::Arc; - use tokio::runtime::Runtime; - use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; +#![cfg(not(debug_assertions))] +#![cfg(test)] +use crate::persisted_dht::load_dht; +use crate::{NetworkConfig, NetworkService}; +use beacon_chain::test_utils::BeaconChainHarness; +use beacon_chain::BeaconChainTypes; +use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; +use futures::StreamExt; +use lighthouse_network::types::{GossipEncoding, GossipKind}; +use lighthouse_network::{Enr, GossipTopic}; +use slog::{o, Drain, Level, Logger}; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::str::FromStr; +use std::sync::Arc; +use tokio::runtime::Runtime; +use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; - impl NetworkService { - fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { - self.libp2p.get_topic_params(topic) - } +impl NetworkService { + fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) } +} - fn get_logger(actual_log: bool) -> Logger { - if actual_log { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(Level::Debug) - }; +fn get_logger(actual_log: bool) -> Logger { + if actual_log { + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(Level::Debug) + }; - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } + Logger::root(drain.fuse(), o!()) + } else { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") } +} - #[test] - fn test_dht_persistence() { - let log = get_logger(false); +#[test] +fn test_dht_persistence() { + let log = get_logger(false); - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .build() - .chain; + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .build() + .chain; - let store = beacon_chain.store.clone(); + let store = beacon_chain.store.clone(); - let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); - let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); - let enrs = vec![enr1, enr2]; + let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); + let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); + let enrs = vec![enr1, enr2]; - let runtime = Arc::new(Runtime::new().unwrap()); + let runtime = Arc::new(Runtime::new().unwrap()); - let (signal, exit) = async_channel::bounded(1); + let (signal, exit) = async_channel::bounded(1); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = + task_executor::TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + config.boot_nodes_enr = enrs.clone(); + let config = Arc::new(config); + runtime.block_on(async move { + // Create a new network service which implicitly gets dropped at the + // end of the block. + + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx: _beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx: _work_reprocessing_rx, + } = <_>::default(); + + let _network_service = NetworkService::start( + beacon_chain.clone(), + config, + executor, + None, + beacon_processor_tx, + work_reprocessing_tx, + ) + .await + .unwrap(); + drop(signal); + }); + + let raw_runtime = Arc::try_unwrap(runtime).unwrap(); + raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); + + // Load the persisted dht from the store + let persisted_enrs = load_dht(store); + assert!( + persisted_enrs.contains(&enrs[0]), + "should have persisted the first ENR to store" + ); + assert!( + persisted_enrs.contains(&enrs[1]), + "should have persisted the second ENR to store" + ); +} + +// Test removing topic weight on old topics when a fork happens. +#[test] +fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone().into()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), exit, - log.clone(), + get_logger(false), shutdown_tx, ); let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; - config.boot_nodes_enr = enrs.clone(); let config = Arc::new(config); - runtime.block_on(async move { - // Create a new network service which implicitly gets dropped at the - // end of the block. - let BeaconProcessorChannels { - beacon_processor_tx, - beacon_processor_rx: _beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx: _work_reprocessing_rx, - } = <_>::default(); + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); - let _network_service = NetworkService::start( - beacon_chain.clone(), - config, - executor, - None, - beacon_processor_tx, - work_reprocessing_tx, - ) - .await - .unwrap(); - drop(signal); - }); - - let raw_runtime = Arc::try_unwrap(runtime).unwrap(); - raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); - - // Load the persisted dht from the store - let persisted_enrs = load_dht(store); - assert!( - persisted_enrs.contains(&enrs[0]), - "should have persisted the first ENR to store" - ); - assert!( - persisted_enrs.contains(&enrs[1]), - "should have persisted the second ENR to store" - ); - } - - // Test removing topic weight on old topics when a fork happens. - #[test] - fn test_removing_topic_weight_on_old_topics() { - let runtime = Arc::new(Runtime::new().unwrap()); - - // Capella spec - let mut spec = MinimalEthSpec::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(Epoch::new(1)); - - // Build beacon chain. - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec.clone().into()) - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .mock_execution_layer() - .build() - .chain; - let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); - assert_eq!(next_fork_name, ForkName::Capella); - - // Build network service. - let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { - let (_, exit) = async_channel::bounded(1); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = task_executor::TaskExecutor::new( - Arc::downgrade(&runtime), - exit, - get_logger(false), - shutdown_tx, - ); - - let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); - config.discv5_config.table_filter = |_| true; // Do not ignore local IPs - config.upnp_enabled = false; - let config = Arc::new(config); - - let beacon_processor_channels = - BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); - NetworkService::build( - beacon_chain.clone(), - config, - executor.clone(), - None, - beacon_processor_channels.beacon_processor_tx, - beacon_processor_channels.work_reprocessing_tx, - ) - .await - .unwrap() - }); - - // Subscribe to the topics. - runtime.block_on(async { - while network_globals.gossipsub_subscriptions.read().len() < 2 { - if let Some(msg) = network_service.attestation_service.next().await { - network_service.on_attestation_service_msg(msg); - } + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.subnet_service.next().await { + network_service.on_subnet_service_msg(msg); } - }); - - // Make sure the service is subscribed to the topics. - let (old_topic1, old_topic2) = { - let mut subnets = SubnetId::compute_subnets_for_epoch::( - network_globals.local_enr().node_id().raw(), - beacon_chain.epoch().unwrap(), - &spec, - ) - .unwrap() - .0 - .collect::>(); - assert_eq!(2, subnets.len()); - - let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; - let old_topic1 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - let old_topic2 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - - (old_topic1, old_topic2) - }; - let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); - assert_eq!(2, subscriptions.len()); - assert!(subscriptions.contains(&old_topic1)); - assert!(subscriptions.contains(&old_topic2)); - let old_topic_params1 = network_service - .get_topic_params(old_topic1.clone()) - .expect("topic score params"); - assert!(old_topic_params1.topic_weight > 0.0); - let old_topic_params2 = network_service - .get_topic_params(old_topic2.clone()) - .expect("topic score params"); - assert!(old_topic_params2.topic_weight > 0.0); - - // Advance slot to the next fork - for _ in 0..MinimalEthSpec::slots_per_epoch() { - beacon_chain.slot_clock.advance_slot(); } + }); - // Run `NetworkService::update_next_fork()`. - runtime.block_on(async { - network_service.update_next_fork(); - }); + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_attestation_subnets( + network_globals.local_enr().node_id().raw(), + &spec, + ) + .collect::>(); + assert_eq!(2, subnets.len()); - // Check that topic_weight on the old topics has been zeroed. - let old_topic_params1 = network_service - .get_topic_params(old_topic1) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params1.topic_weight); + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); - let old_topic_params2 = network_service - .get_topic_params(old_topic2) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params2.topic_weight); + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs deleted file mode 100644 index 432a2b7fb7..0000000000 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ /dev/null @@ -1,687 +0,0 @@ -//! This service keeps track of which shard subnet the beacon node should be subscribed to at any -//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and -//! determines whether attestations should be aggregated and/or passed to the beacon node. - -use super::SubnetServiceMessage; -use std::collections::HashSet; -use std::collections::{HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use delay_map::{HashMapDelay, HashSetDelay}; -use futures::prelude::*; -use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; -use slog::{debug, error, info, o, trace, warn}; -use slot_clock::SlotClock; -use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; - -use crate::metrics; - -/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the -/// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; -/// The fraction of a slot that we subscribe to a subnet before the required slot. -/// -/// Currently a whole slot ahead. -const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; - -/// The number of slots after an aggregator duty where we remove the entry from -/// `aggregate_validators_on_subnet` delay map. -const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; - -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub(crate) enum SubscriptionKind { - /// Long lived subscriptions. - /// - /// These have a longer duration and are advertised in our ENR. - LongLived, - /// Short lived subscriptions. - /// - /// Subscribing to these subnets has a short duration and we don't advertise it in our ENR. - ShortLived, -} - -/// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] -pub struct ExactSubnet { - /// The `SubnetId` associated with this subnet. - pub subnet_id: SubnetId, - /// The `Slot` associated with this subnet. - pub slot: Slot, -} - -pub struct AttestationService { - /// Queued events to return to the driving service. - events: VecDeque, - - /// A reference to the beacon chain to process received attestations. - pub(crate) beacon_chain: Arc>, - - /// Subnets we are currently subscribed to as short lived subscriptions. - /// - /// Once they expire, we unsubscribe from these. - /// We subscribe to subnets when we are an aggregator for an exact subnet. - short_lived_subscriptions: HashMapDelay, - - /// Subnets we are currently subscribed to as long lived subscriptions. - /// - /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. - /// These are required of all beacon nodes. The exact number is determined by the chain - /// specification. - long_lived_subscriptions: HashSet, - - /// Short lived subscriptions that need to be executed in the future. - scheduled_short_lived_subscriptions: HashSetDelay, - - /// A collection timeouts to track the existence of aggregate validator subscriptions at an - /// `ExactSubnet`. - aggregate_validators_on_subnet: Option>, - - /// The waker for the current thread. - waker: Option, - - /// The discovery mechanism of lighthouse is disabled. - discovery_disabled: bool, - - /// We are always subscribed to all subnets. - subscribe_all_subnets: bool, - - /// Our Discv5 node_id. - node_id: NodeId, - - /// Future used to manage subscribing and unsubscribing from long lived subnets. - next_long_lived_subscription_event: Pin>, - - /// Whether this node is a block proposer-only node. - proposer_only: bool, - - /// The logger for the attestation service. - log: slog::Logger, -} - -impl AttestationService { - /* Public functions */ - - /// Establish the service based on the passed configuration. - pub fn new( - beacon_chain: Arc>, - node_id: NodeId, - config: &NetworkConfig, - log: &slog::Logger, - ) -> Self { - let log = log.new(o!("service" => "attestation_service")); - - let slot_duration = beacon_chain.slot_clock.slot_duration(); - - if config.subscribe_all_subnets { - slog::info!(log, "Subscribing to all subnets"); - } else { - slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node, "subscription_duration_in_epochs" => beacon_chain.spec.epochs_per_subnet_subscription); - } - - let track_validators = !config.import_all_attestations; - let aggregate_validators_on_subnet = - track_validators.then(|| HashSetDelay::new(slot_duration)); - let mut service = AttestationService { - events: VecDeque::with_capacity(10), - beacon_chain, - short_lived_subscriptions: HashMapDelay::new(slot_duration), - long_lived_subscriptions: HashSet::default(), - scheduled_short_lived_subscriptions: HashSetDelay::default(), - aggregate_validators_on_subnet, - waker: None, - discovery_disabled: config.disable_discovery, - subscribe_all_subnets: config.subscribe_all_subnets, - node_id, - next_long_lived_subscription_event: { - // Set a dummy sleep. Calculating the current subnet subscriptions will update this - // value with a smarter timing - Box::pin(tokio::time::sleep(Duration::from_secs(1))) - }, - proposer_only: config.proposer_only, - log, - }; - - // If we are not subscribed to all subnets, handle the deterministic set of subnets - if !config.subscribe_all_subnets { - service.recompute_long_lived_subnets(); - } - - service - } - - /// Return count of all currently subscribed subnets (long-lived **and** short-lived). - #[cfg(test)] - pub fn subscription_count(&self) -> usize { - if self.subscribe_all_subnets { - self.beacon_chain.spec.attestation_subnet_count as usize - } else { - let count = self - .short_lived_subscriptions - .keys() - .chain(self.long_lived_subscriptions.iter()) - .collect::>() - .len(); - count - } - } - - /// Returns whether we are subscribed to a subnet for testing purposes. - #[cfg(test)] - pub(crate) fn is_subscribed( - &self, - subnet_id: &SubnetId, - subscription_kind: SubscriptionKind, - ) -> bool { - match subscription_kind { - SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), - SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), - } - } - - #[cfg(test)] - pub(crate) fn long_lived_subscriptions(&self) -> &HashSet { - &self.long_lived_subscriptions - } - - /// Processes a list of validator subscriptions. - /// - /// This will: - /// - Register new validators as being known. - /// - Search for peers for required subnets. - /// - Request subscriptions for subnets on specific slots when required. - /// - Build the timeouts for each of these events. - /// - /// This returns a result simply for the ergonomics of using ?. The result can be - /// safely dropped. - pub fn validator_subscriptions( - &mut self, - subscriptions: impl Iterator, - ) -> Result<(), String> { - // If the node is in a proposer-only state, we ignore all subnet subscriptions. - if self.proposer_only { - return Ok(()); - } - - // Maps each subnet_id subscription to it's highest slot - let mut subnets_to_discover: HashMap = HashMap::new(); - - // Registers the validator with the attestation service. - for subscription in subscriptions { - metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); - - trace!(self.log, - "Validator subscription"; - "subscription" => ?subscription, - ); - - // Compute the subnet that is associated with this subscription - let subnet_id = match SubnetId::compute_subnet::( - subscription.slot, - subscription.attestation_committee_index, - subscription.committee_count_at_slot, - &self.beacon_chain.spec, - ) { - Ok(subnet_id) => subnet_id, - Err(e) => { - warn!(self.log, - "Failed to compute subnet id for validator subscription"; - "error" => ?e, - ); - continue; - } - }; - // Ensure each subnet_id inserted into the map has the highest slot as it's value. - // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. - if let Some(slot) = subnets_to_discover.get(&subnet_id) { - if subscription.slot > *slot { - subnets_to_discover.insert(subnet_id, subscription.slot); - } - } else if !self.discovery_disabled { - subnets_to_discover.insert(subnet_id, subscription.slot); - } - - let exact_subnet = ExactSubnet { - subnet_id, - slot: subscription.slot, - }; - - // Determine if the validator is an aggregator. If so, we subscribe to the subnet and - // if successful add the validator to a mapping of known aggregators for that exact - // subnet. - - if subscription.is_aggregator { - metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); - if let Err(e) = self.subscribe_to_short_lived_subnet(exact_subnet) { - warn!(self.log, - "Subscription to subnet error"; - "error" => e, - ); - } else { - trace!(self.log, - "Subscribed to subnet for aggregator duties"; - "exact_subnet" => ?exact_subnet, - ); - } - } - } - - // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the - // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request( - subnets_to_discover - .into_iter() - .map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }), - ) { - warn!(self.log, "Discovery lookup request error"; "error" => e); - }; - } - - Ok(()) - } - - fn recompute_long_lived_subnets(&mut self) { - // Ensure the next computation is scheduled even if assigning subnets fails. - let next_subscription_event = self - .recompute_long_lived_subnets_inner() - .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); - - debug!(self.log, "Recomputing deterministic long lived subnets"); - self.next_long_lived_subscription_event = - Box::pin(tokio::time::sleep(next_subscription_event)); - - if let Some(waker) = self.waker.as_ref() { - waker.wake_by_ref(); - } - } - - /// Gets the long lived subnets the node should be subscribed to during the current epoch and - /// the remaining duration for which they remain valid. - fn recompute_long_lived_subnets_inner(&mut self) -> Result { - let current_epoch = self.beacon_chain.epoch().map_err(|e| { - if !self - .beacon_chain - .slot_clock - .is_prior_to_genesis() - .unwrap_or(false) - { - error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e) - } - })?; - - let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( - self.node_id.raw(), - current_epoch, - &self.beacon_chain.spec, - ) - .map_err(|e| error!(self.log, "Could not compute subnets for current epoch"; "err" => e))?; - - let next_subscription_slot = - next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let next_subscription_event = self - .beacon_chain - .slot_clock - .duration_to_slot(next_subscription_slot) - .ok_or_else(|| { - error!( - self.log, - "Failed to compute duration to next to long lived subscription event" - ) - })?; - - self.update_long_lived_subnets(subnets.collect()); - - Ok(next_subscription_event) - } - - /// Updates the long lived subnets. - /// - /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr - /// updated accordingly. - fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { - info!(self.log, "Subscribing to long-lived subnets"; "subnets" => ?subnets.iter().collect::>()); - for subnet in &subnets { - // Add the events for those subnets that are new as long lived subscriptions. - if !self.long_lived_subscriptions.contains(subnet) { - // Check if this subnet is new and send the subscription event if needed. - if !self.short_lived_subscriptions.contains_key(subnet) { - debug!(self.log, "Subscribing to subnet"; - "subnet" => ?subnet, - "subscription_kind" => ?SubscriptionKind::LongLived, - ); - self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( - *subnet, - ))); - } - self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet))); - if !self.discovery_disabled { - self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet: Subnet::Attestation(*subnet), - min_ttl: None, - }])) - } - } - } - - // Update the long_lived_subnets set and check for subnets that are being removed - std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); - for subnet in subnets { - if !self.long_lived_subscriptions.contains(&subnet) { - self.handle_removed_subnet(subnet, SubscriptionKind::LongLived); - } - } - } - - /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip - /// verification, re-propagates and returns false. - pub fn should_process_attestation( - &self, - subnet: SubnetId, - attestation: &Attestation, - ) -> bool { - // Proposer-only mode does not need to process attestations - if self.proposer_only { - return false; - } - self.aggregate_validators_on_subnet - .as_ref() - .map(|tracked_vals| { - tracked_vals.contains_key(&ExactSubnet { - subnet_id: subnet, - slot: attestation.data().slot, - }) - }) - .unwrap_or(true) - } - - /* Internal private functions */ - - /// Adds an event to the event queue and notifies that this service is ready to be polled - /// again. - fn queue_event(&mut self, ev: SubnetServiceMessage) { - self.events.push_back(ev); - if let Some(waker) = &self.waker { - waker.wake_by_ref() - } - } - /// Checks if there are currently queued discovery requests and the time required to make the - /// request. - /// - /// If there is sufficient time, queues a peer discovery request for all the required subnets. - fn discover_peers_request( - &mut self, - exact_subnets: impl Iterator, - ) -> Result<(), &'static str> { - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let discovery_subnets: Vec = exact_subnets - .filter_map(|exact_subnet| { - // Check if there is enough time to perform a discovery lookup. - if exact_subnet.slot - >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) - { - // Send out an event to start looking for peers. - // Require the peer for an additional slot to ensure we keep the peer for the - // duration of the subscription. - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot + 1) - .map(|duration| std::time::Instant::now() + duration); - Some(SubnetDiscovery { - subnet: Subnet::Attestation(exact_subnet.subnet_id), - min_ttl, - }) - } else { - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - warn!(self.log, - "Not enough time for a discovery search"; - "subnet_id" => ?exact_subnet - ); - None - } - }) - .collect(); - - if !discovery_subnets.is_empty() { - self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); - } - Ok(()) - } - - // Subscribes to the subnet if it should be done immediately, or schedules it if required. - fn subscribe_to_short_lived_subnet( - &mut self, - ExactSubnet { subnet_id, slot }: ExactSubnet, - ) -> Result<(), &'static str> { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // The short time we schedule the subscription before it's actually required. This - // ensures we are subscribed on time, and allows consecutive subscriptions to the same - // subnet to overlap, reducing subnet churn. - let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; - // The time to the required slot. - let time_to_subscription_slot = self - .beacon_chain - .slot_clock - .duration_to_slot(slot) - .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. - - // Calculate how long before we need to subscribe to the subnet. - let time_to_subscription_start = - time_to_subscription_slot.saturating_sub(advance_subscription_duration); - - // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` - // delay map. - let time_to_unsubscribe = - time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); - } - - // If the subscription should be done in the future, schedule it. Otherwise subscribe - // immediately. - if time_to_subscription_start.is_zero() { - // This is a current or past slot, we subscribe immediately. - self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1)?; - } else { - // This is a future slot, schedule subscribing. - trace!(self.log, "Scheduling subnet subscription"; "subnet" => ?subnet_id, "time_to_subscription_start" => ?time_to_subscription_start); - self.scheduled_short_lived_subscriptions - .insert_at(ExactSubnet { subnet_id, slot }, time_to_subscription_start); - } - - Ok(()) - } - - /* A collection of functions that handle the various timeouts */ - - /// Registers a subnet as subscribed. - /// - /// Checks that the time in which the subscription would end is not in the past. If we are - /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send - /// out the appropriate events. - /// - /// On determinist long lived subnets, this is only used for short lived subscriptions. - fn subscribe_to_short_lived_subnet_immediately( - &mut self, - subnet_id: SubnetId, - end_slot: Slot, - ) -> Result<(), &'static str> { - if self.subscribe_all_subnets { - // Case not handled by this service. - return Ok(()); - } - - let time_to_subscription_end = self - .beacon_chain - .slot_clock - .duration_to_slot(end_slot) - .unwrap_or_default(); - - // First check this is worth doing. - if time_to_subscription_end.is_zero() { - return Err("Time when subscription would end has already passed."); - } - - let subscription_kind = SubscriptionKind::ShortLived; - - // We need to check and add a subscription for the right kind, regardless of the presence - // of the subnet as a subscription of the other kind. This is mainly since long lived - // subscriptions can be removed at any time when a validator goes offline. - - let (subscriptions, already_subscribed_as_other_kind) = ( - &mut self.short_lived_subscriptions, - self.long_lived_subscriptions.contains(&subnet_id), - ); - - match subscriptions.get(&subnet_id) { - Some(current_end_slot) => { - // We are already subscribed. Check if we need to extend the subscription. - if &end_slot > current_end_slot { - trace!(self.log, "Extending subscription to subnet"; - "subnet" => ?subnet_id, - "prev_end_slot" => current_end_slot, - "new_end_slot" => end_slot, - "subscription_kind" => ?subscription_kind, - ); - subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); - } - } - None => { - // This is a new subscription. Add with the corresponding timeout and send the - // notification. - subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); - - // Inform of the subscription. - if !already_subscribed_as_other_kind { - debug!(self.log, "Subscribing to subnet"; - "subnet" => ?subnet_id, - "end_slot" => end_slot, - "subscription_kind" => ?subscription_kind, - ); - self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id, - ))); - } - } - } - - Ok(()) - } - - // Unsubscribes from a subnet that was removed if it does not continue to exist as a - // subscription of the other kind. For long lived subscriptions, it also removes the - // advertisement from our ENR. - fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { - let exists_in_other_subscriptions = match subscription_kind { - SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), - SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), - }; - - if !exists_in_other_subscriptions { - // Subscription no longer exists as short lived or long lived. - debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind); - self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - subnet_id, - ))); - } - - if subscription_kind == SubscriptionKind::LongLived { - // Remove from our ENR even if we remain subscribed in other way. - self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation( - subnet_id, - ))); - } - } -} - -impl Stream for AttestationService { - type Item = SubnetServiceMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Update the waker if needed. - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - // Send out any generated events. - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); - } - - // If we aren't subscribed to all subnets, handle the deterministic long-lived subnets - if !self.subscribe_all_subnets { - match self.next_long_lived_subscription_event.as_mut().poll(cx) { - Poll::Ready(_) => { - self.recompute_long_lived_subnets(); - // We re-wake the task as there could be other subscriptions to process - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Pending => {} - } - } - - // Process scheduled subscriptions that might be ready, since those can extend a soon to - // expire subscription. - match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { - if let Err(e) = - self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1) - { - debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet_id, "err" => e); - } - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // Finally process any expired subscriptions. - match self.short_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { - self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); - // We re-wake the task as there could be other subscriptions to process - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // Poll to remove entries on expiration, no need to act on expiration events. - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { - error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); - } - } - - Poll::Pending - } -} diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index 6450fc72ee..da1f220f04 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -1,10 +1,25 @@ -pub mod attestation_subnets; -pub mod sync_subnets; +//! This service keeps track of which shard subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and +//! determines whether attestations should be aggregated and/or passed to the beacon node. -use lighthouse_network::{Subnet, SubnetDiscovery}; +use std::collections::HashSet; +use std::collections::{HashMap, VecDeque}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use tokio::time::Instant; -pub use attestation_subnets::AttestationService; -pub use sync_subnets::SyncCommitteeService; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use delay_map::HashSetDelay; +use futures::prelude::*; +use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; +use slog::{debug, error, o, warn}; +use slot_clock::SlotClock; +use types::{ + Attestation, EthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + ValidatorSubscription, +}; #[cfg(test)] mod tests; @@ -17,12 +32,646 @@ pub enum SubnetServiceMessage { Unsubscribe(Subnet), /// Add the `SubnetId` to the ENR bitfield. EnrAdd(Subnet), - /// Remove the `SubnetId` from the ENR bitfield. - EnrRemove(Subnet), + /// Remove a sync committee subnet from the ENR. + EnrRemove(SyncSubnetId), /// Discover peers for a list of `SubnetDiscovery`. DiscoverPeers(Vec), } +use crate::metrics; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. +pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; +/// The fraction of a slot that we subscribe to a subnet before the required slot. +/// +/// Currently a whole slot ahead. +const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; + +/// The number of slots after an aggregator duty where we remove the entry from +/// `aggregate_validators_on_subnet` delay map. +const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; + +/// A particular subnet at a given slot. This is used for Attestation subnets and not for sync +/// committee subnets because the logic for handling subscriptions between these types is different. +#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] +pub struct ExactSubnet { + /// The `SubnetId` associated with this subnet. + pub subnet: Subnet, + /// For Attestations, this slot represents the start time at which we need to subscribe to the + /// slot. + pub slot: Slot, +} + +/// The enum used to group all kinds of validator subscriptions +#[derive(Debug, Clone, PartialEq)] +pub enum Subscription { + Attestation(ValidatorSubscription), + SyncCommittee(SyncCommitteeSubscription), +} + +pub struct SubnetService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A reference to the beacon chain to process received attestations. + pub(crate) beacon_chain: Arc>, + + /// Subnets we are currently subscribed to as short lived subscriptions. + /// + /// Once they expire, we unsubscribe from these. + /// We subscribe to subnets when we are an aggregator for an exact subnet. + // NOTE: When setup the default timeout is set for sync committee subscriptions. + subscriptions: HashSetDelay, + + /// Subscriptions that need to be executed in the future. + scheduled_subscriptions: HashSetDelay, + + /// A list of permanent subnets that this node is subscribed to. + // TODO: Shift this to a dynamic bitfield + permanent_attestation_subscriptions: HashSet, + + /// A collection timeouts to track the existence of aggregate validator subscriptions at an + /// `ExactSubnet`. + aggregate_validators_on_subnet: Option>, + + /// The waker for the current thread. + waker: Option, + + /// The discovery mechanism of lighthouse is disabled. + discovery_disabled: bool, + + /// We are always subscribed to all subnets. + subscribe_all_subnets: bool, + + /// Whether this node is a block proposer-only node. + proposer_only: bool, + + /// The logger for the attestation service. + log: slog::Logger, +} + +impl SubnetService { + /* Public functions */ + + /// Establish the service based on the passed configuration. + pub fn new( + beacon_chain: Arc>, + node_id: NodeId, + config: &NetworkConfig, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "subnet_service")); + + let slot_duration = beacon_chain.slot_clock.slot_duration(); + + if config.subscribe_all_subnets { + slog::info!(log, "Subscribing to all subnets"); + } + + // Build the list of known permanent subscriptions, so that we know not to subscribe or + // discover them. + let mut permanent_attestation_subscriptions = HashSet::default(); + if config.subscribe_all_subnets { + // We are subscribed to all subnets, set all the bits to true. + for index in 0..beacon_chain.spec.attestation_subnet_count { + permanent_attestation_subscriptions + .insert(Subnet::Attestation(SubnetId::from(index))); + } + } else { + // Not subscribed to all subnets, so just calculate the required subnets from the node + // id. + for subnet_id in + SubnetId::compute_attestation_subnets(node_id.raw(), &beacon_chain.spec) + { + permanent_attestation_subscriptions.insert(Subnet::Attestation(subnet_id)); + } + } + + // Set up the sync committee subscriptions + let spec = &beacon_chain.spec; + let epoch_duration_secs = + beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); + let default_sync_committee_duration = Duration::from_secs( + epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()), + ); + + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); + + let mut events = VecDeque::with_capacity(10); + + // Queue discovery queries for the permanent attestation subnets + if !config.disable_discovery { + events.push_back(SubnetServiceMessage::DiscoverPeers( + permanent_attestation_subscriptions + .iter() + .cloned() + .map(|subnet| SubnetDiscovery { + subnet, + min_ttl: None, + }) + .collect(), + )); + } + + // Pre-populate the events with permanent subscriptions + for subnet in permanent_attestation_subscriptions.iter() { + events.push_back(SubnetServiceMessage::Subscribe(*subnet)); + events.push_back(SubnetServiceMessage::EnrAdd(*subnet)); + } + + SubnetService { + events, + beacon_chain, + subscriptions: HashSetDelay::new(default_sync_committee_duration), + permanent_attestation_subscriptions, + scheduled_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, + waker: None, + discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + proposer_only: config.proposer_only, + log, + } + } + + /// Return count of all currently subscribed short-lived subnets. + #[cfg(test)] + pub fn subscriptions(&self) -> impl Iterator { + self.subscriptions.iter() + } + + #[cfg(test)] + pub fn permanent_subscriptions(&self) -> impl Iterator { + self.permanent_attestation_subscriptions.iter() + } + + /// Returns whether we are subscribed to a subnet for testing purposes. + #[cfg(test)] + pub(crate) fn is_subscribed(&self, subnet: &Subnet) -> bool { + self.subscriptions.contains_key(subnet) + || self.permanent_attestation_subscriptions.contains(subnet) + } + + /// Processes a list of validator subscriptions. + /// + /// This is fundamentally called form the HTTP API when a validator requests duties from us + /// This will: + /// - Register new validators as being known. + /// - Search for peers for required subnets. + /// - Request subscriptions for subnets on specific slots when required. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions(&mut self, subscriptions: impl Iterator) { + // If the node is in a proposer-only state, we ignore all subnet subscriptions. + if self.proposer_only { + return; + } + + // Maps each subnet subscription to it's highest slot + let mut subnets_to_discover: HashMap = HashMap::new(); + + // Registers the validator with the attestation service. + for general_subscription in subscriptions { + match general_subscription { + Subscription::Attestation(subscription) => { + metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); + + // Compute the subnet that is associated with this subscription + let subnet = match SubnetId::compute_subnet::( + subscription.slot, + subscription.attestation_committee_index, + subscription.committee_count_at_slot, + &self.beacon_chain.spec, + ) { + Ok(subnet_id) => Subnet::Attestation(subnet_id), + Err(e) => { + warn!(self.log, + "Failed to compute subnet id for validator subscription"; + "error" => ?e, + ); + continue; + } + }; + + // Ensure each subnet_id inserted into the map has the highest slot as it's value. + // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. + if let Some(slot) = subnets_to_discover.get(&subnet) { + if subscription.slot > *slot { + subnets_to_discover.insert(subnet, subscription.slot); + } + } else if !self.discovery_disabled { + subnets_to_discover.insert(subnet, subscription.slot); + } + + let exact_subnet = ExactSubnet { + subnet, + slot: subscription.slot, + }; + + // Determine if the validator is an aggregator. If so, we subscribe to the subnet and + // if successful add the validator to a mapping of known aggregators for that exact + // subnet. + + if subscription.is_aggregator { + metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); + if let Err(e) = self.subscribe_to_subnet(exact_subnet) { + warn!(self.log, + "Subscription to subnet error"; + "error" => e, + ); + } + } + } + Subscription::SyncCommittee(subscription) => { + metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); + // NOTE: We assume all subscriptions have been verified before reaching this service + + // Registers the validator with the subnet service. + let subnet_ids = + match SyncSubnetId::compute_subnets_for_sync_committee::( + &subscription.sync_committee_indices, + ) { + Ok(subnet_ids) => subnet_ids, + Err(e) => { + warn!(self.log, + "Failed to compute subnet id for sync committee subscription"; + "error" => ?e, + "validator_index" => subscription.validator_index + ); + continue; + } + }; + + for subnet_id in subnet_ids { + let subnet = Subnet::SyncCommittee(subnet_id); + let slot_required_until = subscription + .until_epoch + .start_slot(T::EthSpec::slots_per_epoch()); + subnets_to_discover.insert(subnet, slot_required_until); + + let Some(duration_to_unsubscribe) = self + .beacon_chain + .slot_clock + .duration_to_slot(slot_required_until) + else { + warn!(self.log, "Subscription to sync subnet error"; "error" => "Unable to determine duration to unsubscription slot", "validator_index" => subscription.validator_index); + continue; + }; + + if duration_to_unsubscribe == Duration::from_secs(0) { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .unwrap_or(Slot::from(0u64)); + warn!( + self.log, + "Sync committee subscription is past expiration"; + "subnet" => ?subnet, + "current_slot" => ?current_slot, + "unsubscribe_slot" => ?slot_required_until, ); + continue; + } + + self.subscribe_to_sync_subnet( + subnet, + duration_to_unsubscribe, + slot_required_until, + ); + } + } + } + } + + // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the + // required subnets. + if !self.discovery_disabled { + if let Err(e) = self.discover_peers_request(subnets_to_discover.into_iter()) { + warn!(self.log, "Discovery lookup request error"; "error" => e); + }; + } + } + + /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip + /// verification, re-propagates and returns false. + pub fn should_process_attestation( + &self, + subnet: Subnet, + attestation: &Attestation, + ) -> bool { + // Proposer-only mode does not need to process attestations + if self.proposer_only { + return false; + } + self.aggregate_validators_on_subnet + .as_ref() + .map(|tracked_vals| { + tracked_vals.contains_key(&ExactSubnet { + subnet, + slot: attestation.data().slot, + }) + }) + .unwrap_or(true) + } + + /* Internal private functions */ + + /// Adds an event to the event queue and notifies that this service is ready to be polled + /// again. + fn queue_event(&mut self, ev: SubnetServiceMessage) { + self.events.push_back(ev); + if let Some(waker) = &self.waker { + waker.wake_by_ref() + } + } + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + // NOTE: Sending early subscriptions results in early searching for peers on subnets. + fn discover_peers_request( + &mut self, + subnets_to_discover: impl Iterator, + ) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let discovery_subnets: Vec = subnets_to_discover + .filter_map(|(subnet, relevant_slot)| { + // We generate discovery requests for all subnets (even one's we are permenantly + // subscribed to) in order to ensure our peer counts are satisfactory to perform the + // necessary duties. + + // Check if there is enough time to perform a discovery lookup. + if relevant_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) + { + // Send out an event to start looking for peers. + // Require the peer for an additional slot to ensure we keep the peer for the + // duration of the subscription. + let min_ttl = self + .beacon_chain + .slot_clock + .duration_to_slot(relevant_slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { subnet, min_ttl }) + } else { + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!(self.log, + "Not enough time for a discovery search"; + "subnet_id" => ?subnet, + ); + None + } + }) + .collect(); + + if !discovery_subnets.is_empty() { + self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + } + Ok(()) + } + + // Subscribes to the subnet if it should be done immediately, or schedules it if required. + fn subscribe_to_subnet( + &mut self, + ExactSubnet { subnet, slot }: ExactSubnet, + ) -> Result<(), &'static str> { + // If the subnet is one of our permanent subnets, we do not need to subscribe. + if self.subscribe_all_subnets || self.permanent_attestation_subscriptions.contains(&subnet) + { + return Ok(()); + } + + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain + .slot_clock + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = + time_to_subscription_slot.saturating_sub(advance_subscription_duration); + + // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` + // delay map. + let time_to_unsubscribe = + time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + tracked_vals.insert_at(ExactSubnet { subnet, slot }, time_to_unsubscribe); + } + + // If the subscription should be done in the future, schedule it. Otherwise subscribe + // immediately. + if time_to_subscription_start.is_zero() { + // This is a current or past slot, we subscribe immediately. + self.subscribe_to_subnet_immediately(subnet, slot + 1)?; + } else { + // This is a future slot, schedule subscribing. + // We need to include the slot to make the key unique to prevent overwriting the entry + // for the same subnet. + self.scheduled_subscriptions + .insert_at(ExactSubnet { subnet, slot }, time_to_subscription_start); + } + + Ok(()) + } + + /// Adds a subscription event to the sync subnet. + fn subscribe_to_sync_subnet( + &mut self, + subnet: Subnet, + duration_to_unsubscribe: Duration, + slot_required_until: Slot, + ) { + // Return if we have subscribed to all subnets + if self.subscribe_all_subnets { + return; + } + + // Update the unsubscription duration if we already have a subscription for the subnet + if let Some(current_instant_to_unsubscribe) = self.subscriptions.deadline(&subnet) { + // The extra 500ms in the comparison accounts of the inaccuracy of the underlying + // DelayQueue inside the delaymap struct. + let current_duration_to_unsubscribe = (current_instant_to_unsubscribe + + Duration::from_millis(500)) + .checked_duration_since(Instant::now()) + .unwrap_or(Duration::from_secs(0)); + + if duration_to_unsubscribe > current_duration_to_unsubscribe { + self.subscriptions + .update_timeout(&subnet, duration_to_unsubscribe); + } + } else { + // We have not subscribed before, so subscribe + self.subscriptions + .insert_at(subnet, duration_to_unsubscribe); + // We are not currently subscribed and have no waiting subscription, create one + debug!(self.log, "Subscribing to subnet"; "subnet" => ?subnet, "until" => ?slot_required_until); + self.events + .push_back(SubnetServiceMessage::Subscribe(subnet)); + + // add the sync subnet to the ENR bitfield + self.events.push_back(SubnetServiceMessage::EnrAdd(subnet)); + } + } + + /* A collection of functions that handle the various timeouts */ + + /// Registers a subnet as subscribed. + /// + /// Checks that the time in which the subscription would end is not in the past. If we are + /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send + /// out the appropriate events. + fn subscribe_to_subnet_immediately( + &mut self, + subnet: Subnet, + end_slot: Slot, + ) -> Result<(), &'static str> { + if self.subscribe_all_subnets { + // Case not handled by this service. + return Ok(()); + } + + let time_to_subscription_end = self + .beacon_chain + .slot_clock + .duration_to_slot(end_slot) + .unwrap_or_default(); + + // First check this is worth doing. + if time_to_subscription_end.is_zero() { + return Err("Time when subscription would end has already passed."); + } + + // Check if we already have this subscription. If we do, optionally update the timeout of + // when we need the subscription, otherwise leave as is. + // If this is a new subscription simply add it to our mapping and subscribe. + match self.subscriptions.deadline(&subnet) { + Some(current_end_slot_time) => { + // We are already subscribed. Check if we need to extend the subscription. + if current_end_slot_time + .checked_duration_since(Instant::now()) + .unwrap_or(Duration::from_secs(0)) + < time_to_subscription_end + { + self.subscriptions + .update_timeout(&subnet, time_to_subscription_end); + } + } + None => { + // This is a new subscription. Add with the corresponding timeout and send the + // notification. + self.subscriptions + .insert_at(subnet, time_to_subscription_end); + + // Inform of the subscription. + debug!(self.log, "Subscribing to subnet"; + "subnet" => ?subnet, + "end_slot" => end_slot, + ); + self.queue_event(SubnetServiceMessage::Subscribe(subnet)); + } + } + Ok(()) + } + + // Unsubscribes from a subnet that was removed. + fn handle_removed_subnet(&mut self, subnet: Subnet) { + if !self.subscriptions.contains_key(&subnet) { + // Subscription no longer exists as short lived subnet + debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet); + self.queue_event(SubnetServiceMessage::Unsubscribe(subnet)); + + // If this is a sync subnet, we need to remove it from our ENR. + if let Subnet::SyncCommittee(sync_subnet_id) = subnet { + self.queue_event(SubnetServiceMessage::EnrRemove(sync_subnet_id)); + } + } + } +} + +impl Stream for SubnetService { + type Item = SubnetServiceMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // Update the waker if needed. + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // Send out any generated events. + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + // Process scheduled subscriptions that might be ready, since those can extend a soon to + // expire subscription. + match self.scheduled_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => { + let ExactSubnet { subnet, .. } = exact_subnet; + let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default(); + if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) { + debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); + } + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Process any expired subscriptions. + match self.subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(subnet))) => { + self.handle_removed_subnet(subnet); + // We re-wake the task as there could be other subscriptions to process + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Poll to remove entries on expiration, no need to act on expiration events. + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { + error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); + } + } + + Poll::Pending + } +} + /// Note: This `PartialEq` impl is for use only in tests. /// The `DiscoverPeers` comparison is good enough for testing only. #[cfg(test)] @@ -32,7 +681,6 @@ impl PartialEq for SubnetServiceMessage { (SubnetServiceMessage::Subscribe(a), SubnetServiceMessage::Subscribe(b)) => a == b, (SubnetServiceMessage::Unsubscribe(a), SubnetServiceMessage::Unsubscribe(b)) => a == b, (SubnetServiceMessage::EnrAdd(a), SubnetServiceMessage::EnrAdd(b)) => a == b, - (SubnetServiceMessage::EnrRemove(a), SubnetServiceMessage::EnrRemove(b)) => a == b, (SubnetServiceMessage::DiscoverPeers(a), SubnetServiceMessage::DiscoverPeers(b)) => { if a.len() != b.len() { return false; diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs deleted file mode 100644 index eda7ce8efb..0000000000 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ /dev/null @@ -1,359 +0,0 @@ -//! This service keeps track of which sync committee subnet the beacon node should be subscribed to at any -//! given time. It schedules subscriptions to sync committee subnets and requests peer discoveries. - -use std::collections::{hash_map::Entry, HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use futures::prelude::*; -use slog::{debug, error, o, trace, warn}; - -use super::SubnetServiceMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use delay_map::HashSetDelay; -use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; -use slot_clock::SlotClock; -use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; - -use crate::metrics; - -/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the -/// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; - -/// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -pub struct ExactSubnet { - /// The `SyncSubnetId` associated with this subnet. - pub subnet_id: SyncSubnetId, - /// The epoch until which we need to stay subscribed to the subnet. - pub until_epoch: Epoch, -} -pub struct SyncCommitteeService { - /// Queued events to return to the driving service. - events: VecDeque, - - /// A reference to the beacon chain to process received attestations. - pub(crate) beacon_chain: Arc>, - - /// The collection of all currently subscribed subnets. - subscriptions: HashMap, - - /// A collection of timeouts for when to unsubscribe from a subnet. - unsubscriptions: HashSetDelay, - - /// The waker for the current thread. - waker: Option, - - /// The discovery mechanism of lighthouse is disabled. - discovery_disabled: bool, - - /// We are always subscribed to all subnets. - subscribe_all_subnets: bool, - - /// Whether this node is a block proposer-only node. - proposer_only: bool, - - /// The logger for the attestation service. - log: slog::Logger, -} - -impl SyncCommitteeService { - /* Public functions */ - - pub fn new( - beacon_chain: Arc>, - config: &NetworkConfig, - log: &slog::Logger, - ) -> Self { - let log = log.new(o!("service" => "sync_committee_service")); - - let spec = &beacon_chain.spec; - let epoch_duration_secs = - beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); - let default_timeout = - epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()); - - SyncCommitteeService { - events: VecDeque::with_capacity(10), - beacon_chain, - subscriptions: HashMap::new(), - unsubscriptions: HashSetDelay::new(Duration::from_secs(default_timeout)), - waker: None, - subscribe_all_subnets: config.subscribe_all_subnets, - discovery_disabled: config.disable_discovery, - proposer_only: config.proposer_only, - log, - } - } - - /// Return count of all currently subscribed subnets. - #[cfg(test)] - pub fn subscription_count(&self) -> usize { - use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; - if self.subscribe_all_subnets { - SYNC_COMMITTEE_SUBNET_COUNT as usize - } else { - self.subscriptions.len() - } - } - - /// Processes a list of sync committee subscriptions. - /// - /// This will: - /// - Search for peers for required subnets. - /// - Request subscriptions required subnets. - /// - Build the timeouts for each of these events. - /// - /// This returns a result simply for the ergonomics of using ?. The result can be - /// safely dropped. - pub fn validator_subscriptions( - &mut self, - subscriptions: Vec, - ) -> Result<(), String> { - // A proposer-only node does not subscribe to any sync-committees - if self.proposer_only { - return Ok(()); - } - - let mut subnets_to_discover = Vec::new(); - for subscription in subscriptions { - metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); - //NOTE: We assume all subscriptions have been verified before reaching this service - - // Registers the validator with the subnet service. - // This will subscribe to long-lived random subnets if required. - trace!(self.log, - "Sync committee subscription"; - "subscription" => ?subscription, - ); - - let subnet_ids = match SyncSubnetId::compute_subnets_for_sync_committee::( - &subscription.sync_committee_indices, - ) { - Ok(subnet_ids) => subnet_ids, - Err(e) => { - warn!(self.log, - "Failed to compute subnet id for sync committee subscription"; - "error" => ?e, - "validator_index" => subscription.validator_index - ); - continue; - } - }; - - for subnet_id in subnet_ids { - let exact_subnet = ExactSubnet { - subnet_id, - until_epoch: subscription.until_epoch, - }; - subnets_to_discover.push(exact_subnet.clone()); - if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { - warn!(self.log, - "Subscription to sync subnet error"; - "error" => e, - "validator_index" => subscription.validator_index, - ); - } else { - trace!(self.log, - "Subscribed to subnet for sync committee duties"; - "exact_subnet" => ?exact_subnet, - "validator_index" => subscription.validator_index - ); - } - } - } - // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the - // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request(subnets_to_discover.iter()) { - warn!(self.log, "Discovery lookup request error"; "error" => e); - }; - } - - // pre-emptively wake the thread to check for new events - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } - Ok(()) - } - - /* Internal private functions */ - - /// Checks if there are currently queued discovery requests and the time required to make the - /// request. - /// - /// If there is sufficient time, queues a peer discovery request for all the required subnets. - fn discover_peers_request<'a>( - &mut self, - exact_subnets: impl Iterator, - ) -> Result<(), &'static str> { - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - - let discovery_subnets: Vec = exact_subnets - .filter_map(|exact_subnet| { - let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); - // check if there is enough time to perform a discovery lookup - if until_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { - // if the slot is more than epoch away, add an event to start looking for peers - // add one slot to ensure we keep the peer for the subscription slot - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(until_slot + 1) - .map(|duration| std::time::Instant::now() + duration); - Some(SubnetDiscovery { - subnet: Subnet::SyncCommittee(exact_subnet.subnet_id), - min_ttl, - }) - } else { - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - warn!(self.log, - "Not enough time for a discovery search"; - "subnet_id" => ?exact_subnet - ); - None - } - }) - .collect(); - - if !discovery_subnets.is_empty() { - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); - } - Ok(()) - } - - /// Adds a subscription event and an associated unsubscription event if required. - fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { - // Return if we have subscribed to all subnets - if self.subscribe_all_subnets { - return Ok(()); - } - - // Return if we already have a subscription for exact_subnet - if self.subscriptions.get(&exact_subnet.subnet_id) == Some(&exact_subnet.until_epoch) { - return Ok(()); - } - - // Return if we already have subscription set to expire later than the current request. - if let Some(until_epoch) = self.subscriptions.get(&exact_subnet.subnet_id) { - if *until_epoch >= exact_subnet.until_epoch { - return Ok(()); - } - } - - // initialise timing variables - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); - // Calculate the duration to the unsubscription event. - let expected_end_subscription_duration = if current_slot >= until_slot { - warn!( - self.log, - "Sync committee subscription is past expiration"; - "current_slot" => current_slot, - "exact_subnet" => ?exact_subnet, - ); - return Ok(()); - } else { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // the duration until we no longer need this subscription. We assume a single slot is - // sufficient. - self.beacon_chain - .slot_clock - .duration_to_slot(until_slot) - .ok_or("Unable to determine duration to unsubscription slot")? - + slot_duration - }; - - if let Entry::Vacant(e) = self.subscriptions.entry(exact_subnet.subnet_id) { - // We are not currently subscribed and have no waiting subscription, create one - debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "until_epoch" => ?exact_subnet.until_epoch); - e.insert(exact_subnet.until_epoch); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::SyncCommittee( - exact_subnet.subnet_id, - ))); - - // add the subnet to the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee( - exact_subnet.subnet_id, - ))); - - // add an unsubscription event to remove ourselves from the subnet once completed - self.unsubscriptions - .insert_at(exact_subnet.subnet_id, expected_end_subscription_duration); - } else { - // We are already subscribed, extend the unsubscription duration - self.unsubscriptions - .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); - } - - Ok(()) - } - - /// A queued unsubscription is ready. - fn handle_unsubscriptions(&mut self, subnet_id: SyncSubnetId) { - debug!(self.log, "Unsubscribing from subnet"; "subnet" => *subnet_id); - - self.subscriptions.remove(&subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::SyncCommittee( - subnet_id, - ))); - - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::SyncCommittee( - subnet_id, - ))); - } -} - -impl Stream for SyncCommitteeService { - type Item = SubnetServiceMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // update the waker if needed - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - // process any un-subscription events - match self.unsubscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // process any generated events - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); - } - - Poll::Pending - } -} diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index a784b05ea7..7283b4af31 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -5,9 +5,9 @@ use beacon_chain::{ test_utils::get_kzg, BeaconChain, }; -use futures::prelude::*; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::NetworkConfig; +use logging::test_logger; use slog::{o, Drain, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; @@ -21,6 +21,10 @@ use types::{ SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; +// Set to enable/disable logging +// const TEST_LOG_LEVEL: Option = Some(slog::Level::Debug); +const TEST_LOG_LEVEL: Option = None; + const SLOT_DURATION_MILLIS: u64 = 400; type TestBeaconChainType = Witness< @@ -42,7 +46,7 @@ impl TestBeaconChain { let keypairs = generate_deterministic_keypairs(1); - let log = get_logger(None); + let log = get_logger(TEST_LOG_LEVEL); let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); @@ -114,15 +118,13 @@ fn get_logger(log_level: Option) -> Logger { static CHAIN: LazyLock = LazyLock::new(TestBeaconChain::new_with_system_clock); -fn get_attestation_service( - log_level: Option, -) -> AttestationService { - let log = get_logger(log_level); +fn get_subnet_service() -> SubnetService { + let log = test_logger(); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); - AttestationService::new( + SubnetService::new( beacon_chain, lighthouse_network::discv5::enr::NodeId::random(), &config, @@ -130,15 +132,6 @@ fn get_attestation_service( ) } -fn get_sync_committee_service() -> SyncCommitteeService { - let log = get_logger(None); - let config = NetworkConfig::default(); - - let beacon_chain = CHAIN.chain.clone(); - - SyncCommitteeService::new(beacon_chain, &config, &log) -} - // gets a number of events from the subscription service, or returns none if it times out after a number // of slots async fn get_events + Unpin>( @@ -172,10 +165,10 @@ async fn get_events + Unpin>( events } -mod attestation_service { +mod test { #[cfg(not(windows))] - use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; + use crate::subnet_service::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -184,13 +177,13 @@ mod attestation_service { slot: Slot, committee_count_at_slot: u64, is_aggregator: bool, - ) -> ValidatorSubscription { - ValidatorSubscription { + ) -> Subscription { + Subscription::Attestation(ValidatorSubscription { attestation_committee_index, slot, committee_count_at_slot, is_aggregator, - } + }) } fn get_subscriptions( @@ -198,7 +191,7 @@ mod attestation_service { slot: Slot, committee_count_at_slot: u64, is_aggregator: bool, - ) -> Vec { + ) -> Vec { (0..validator_count) .map(|validator_index| { get_subscription( @@ -215,72 +208,77 @@ mod attestation_service { async fn subscribe_current_slot_wait_for_unsubscribe() { // subscription config let committee_index = 1; - // Keep a low subscription slot so that there are no additional subnet discovery events. - let subscription_slot = 0; - let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + let _events = get_events(&mut subnet_service, None, 1).await; + + let current_slot = subnet_service .beacon_chain .slot_clock .now() .expect("Could not get current slot"); + // Generate a subnet that isn't in our permanent subnet collection + let subscription_slot = current_slot + 1; + let mut committee_count = 1; + let mut subnet = Subnet::Attestation( + SubnetId::compute_subnet::( + subscription_slot, + committee_index, + committee_count, + &subnet_service.beacon_chain.spec, + ) + .unwrap(), + ); + while subnet_service + .permanent_subscriptions() + .any(|x| *x == subnet) + { + committee_count += 1; + subnet = Subnet::Attestation( + SubnetId::compute_subnet::( + subscription_slot, + committee_index, + committee_count, + &subnet_service.beacon_chain.spec, + ) + .unwrap(), + ); + } + let subscriptions = vec![get_subscription( committee_index, - current_slot + Slot::new(subscription_slot), + subscription_slot, committee_count, true, )]; // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); // not enough time for peer discovery, just subscribe, unsubscribe - let subnet_id = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot), - committee_index, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); let expected = [ - SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id)), - SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id)), + SubnetServiceMessage::Subscribe(subnet), + SubnetServiceMessage::Unsubscribe(subnet), ]; // Wait for 1 slot duration to get the unsubscription event let events = get_events( - &mut attestation_service, - Some(subnets_per_node * 3 + 2), - (MainnetEthSpec::slots_per_epoch() * 3) as u32, + &mut subnet_service, + Some(2), + (MainnetEthSpec::slots_per_epoch()) as u32, ) .await; - matches::assert_matches!( - events[..6], - [ - SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3), - SubnetServiceMessage::DiscoverPeers(_), - SubnetServiceMessage::Subscribe(_), - SubnetServiceMessage::EnrAdd(_), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); + assert_eq!(events, expected); - // If the long lived and short lived subnets are the same, there should be no more events - // as we don't resubscribe already subscribed subnets. - if !attestation_service - .is_subscribed(&subnet_id, attestation_subnets::SubscriptionKind::LongLived) - { - assert_eq!(expected[..], events[subnets_per_node * 3..]); - } - // Should be subscribed to only subnets_per_node long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + // Should be subscribed to only subnets_per_node permananet subnet after unsubscription. + assert_eq!( + subnet_service.permanent_subscriptions().count(), + subnets_per_node + ); + assert_eq!(subnet_service.subscriptions().count(), 0); } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. @@ -289,7 +287,6 @@ mod attestation_service { async fn test_same_subnet_unsubscription() { // subscription config let committee_count = 1; - let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // Makes 2 validator subscriptions to the same subnet but at different slots. // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). @@ -298,9 +295,10 @@ mod attestation_service { let com1 = 1; let com2 = 0; - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + // create the subnet service and subscriptions + let mut subnet_service = get_subnet_service(); + let _events = get_events(&mut subnet_service, None, 0).await; + let current_slot = subnet_service .beacon_chain .slot_clock .now() @@ -324,7 +322,7 @@ mod attestation_service { current_slot + Slot::new(subscription_slot1), com1, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, ) .unwrap(); @@ -332,7 +330,7 @@ mod attestation_service { current_slot + Slot::new(subscription_slot2), com2, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, ) .unwrap(); @@ -341,110 +339,80 @@ mod attestation_service { assert_eq!(subnet_id1, subnet_id2); // submit the subscriptions - attestation_service - .validator_subscriptions(vec![sub1, sub2].into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter()); // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) - // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations). - let events = get_events(&mut attestation_service, None, 1).await; - matches::assert_matches!( - events[..3], - [ - SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); - let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); - // Should be still subscribed to 2 long lived and up to 1 short lived subnet if both are - // different. - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { - // The index is 3*subnets_per_node (because we subscribe + discover + enr per long lived - // subnet) + 1 - let index = 3 * subnets_per_node; - assert_eq!(expected, events[index]); - assert_eq!( - attestation_service.subscription_count(), - subnets_per_node + 1 - ); + if subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + // If we are permanently subscribed to this subnet, we won't see a subscribe message + let _ = get_events(&mut subnet_service, None, 1).await; } else { - assert!(attestation_service.subscription_count() == subnets_per_node); + let subscription = get_events(&mut subnet_service, None, 1).await; + assert_eq!(subscription, [expected]); } // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; + let unsubscribe_event = get_events(&mut subnet_service, None, 1).await; // If the long lived and short lived subnets are different, we should get an unsubscription // event. - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { - assert_eq!( - [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - subnet_id1 - ))], - unsubscribe_event[..] - ); + let expected = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!([expected], unsubscribe_event[..]); } - // Should be subscribed 2 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + // Should no longer be subscribed to any short lived subnets after unsubscription. + assert_eq!(subnet_service.subscriptions().count(), 0); } #[tokio::test] async fn subscribe_all_subnets() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; let subscription_slot = 3; - let subscription_count = attestation_subnet_count; + let subscriptions_count = attestation_subnet_count; let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + let current_slot = subnet_service .beacon_chain .slot_clock .now() .expect("Could not get current slot"); let subscriptions = get_subscriptions( - subscription_count, + subscriptions_count, current_slot + subscription_slot, committee_count, true, ); // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut attestation_service, Some(131), 10).await; + let events = get_events(&mut subnet_service, Some(130), 10).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; let mut unsubscribe_event_count = 0; + let mut subscription_event_count = 0; for event in &events { match event { SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, - SubnetServiceMessage::Subscribe(_any_subnet) => {} + SubnetServiceMessage::Subscribe(_any_subnet) => subscription_event_count += 1, SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, SubnetServiceMessage::Unsubscribe(_) => unsubscribe_event_count += 1, - _ => unexpected_msg_count += 1, + SubnetServiceMessage::EnrRemove(_) => {} } } - // There should be a Subscribe Event, and Enr Add event and a DiscoverPeers event for each - // long-lived subnet initially. The next event should be a bulk discovery event. - let bulk_discovery_index = 3 * subnets_per_node; + // There should be a Subscribe Event, an Enr Add event for each + // permanent subnet initially. There is a single discovery event for the permanent + // subnets. + // The next event should be a bulk discovery event. + let bulk_discovery_index = subnets_per_node * 2 + 1; // The bulk discovery request length should be equal to validator_count let bulk_discovery_event = &events[bulk_discovery_index]; if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { @@ -455,14 +423,13 @@ mod attestation_service { // 64 `DiscoverPeer` requests of length 1 corresponding to deterministic subnets // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. - assert_eq!(discover_peer_count, subnets_per_node + 1); - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + assert_eq!(discover_peer_count, 1 + 1); + assert_eq!(subscription_event_count, attestation_subnet_count); assert_eq!(enr_add_count, subnets_per_node); assert_eq!( unsubscribe_event_count, attestation_subnet_count - subnets_per_node as u64 ); - assert_eq!(unexpected_msg_count, 0); // test completed successfully } @@ -473,30 +440,28 @@ mod attestation_service { let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // the 65th subscription should result in no more messages than the previous scenario - let subscription_count = attestation_subnet_count + 1; + let subscriptions_count = attestation_subnet_count + 1; let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + let current_slot = subnet_service .beacon_chain .slot_clock .now() .expect("Could not get current slot"); let subscriptions = get_subscriptions( - subscription_count, + subscriptions_count, current_slot + subscription_slot, committee_count, true, ); // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut attestation_service, None, 3).await; + let events = get_events(&mut subnet_service, None, 3).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unexpected_msg_count = 0; @@ -506,7 +471,10 @@ mod attestation_service { SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, SubnetServiceMessage::Subscribe(_any_subnet) => {} SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, - _ => unexpected_msg_count += 1, + _ => { + unexpected_msg_count += 1; + println!("{:?}", event); + } } } @@ -520,8 +488,8 @@ mod attestation_service { // subnets_per_node `DiscoverPeer` requests of length 1 corresponding to long-lived subnets // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. - assert_eq!(discover_peer_count, subnets_per_node + 1); - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + assert_eq!(discover_peer_count, 1 + 1); // Generates a single discovery for permanent + // subscriptions and 1 for the subscription assert_eq!(enr_add_count, subnets_per_node); assert_eq!(unexpected_msg_count, 0); } @@ -531,18 +499,23 @@ mod attestation_service { async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config let committee_count = 1; - let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; - // Makes 2 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + // Makes 3 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for each of the later slots subscriptions + // (subscription_slot2 and subscription_slot3). let subscription_slot1 = 0; let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let subscription_slot3 = subscription_slot2 * 2; let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; let com2 = 0; + let com3 = CHAIN.chain.spec.attestation_subnet_count - com1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + // Remove permanent events + let _events = get_events(&mut subnet_service, None, 0).await; + + let current_slot = subnet_service .beacon_chain .slot_clock .now() @@ -562,11 +535,18 @@ mod attestation_service { true, ); + let sub3 = get_subscription( + com3, + current_slot + Slot::new(subscription_slot3), + committee_count, + true, + ); + let subnet_id1 = SubnetId::compute_subnet::( current_slot + Slot::new(subscription_slot1), com1, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, ) .unwrap(); @@ -574,48 +554,48 @@ mod attestation_service { current_slot + Slot::new(subscription_slot2), com2, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, + ) + .unwrap(); + + let subnet_id3 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot3), + com3, + committee_count, + &subnet_service.beacon_chain.spec, ) .unwrap(); // Assert that subscriptions are different but their subnet is the same assert_ne!(sub1, sub2); + assert_ne!(sub1, sub3); + assert_ne!(sub2, sub3); assert_eq!(subnet_id1, subnet_id2); + assert_eq!(subnet_id1, subnet_id3); // submit the subscriptions - attestation_service - .validator_subscriptions(vec![sub1, sub2].into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(vec![sub1, sub2, sub3].into_iter()); // Unsubscription event should happen at the end of the slot. - let events = get_events(&mut attestation_service, None, 1).await; - matches::assert_matches!( - events[..3], - [ - SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); + // We wait for 2 slots, to avoid timeout issues + let events = get_events(&mut subnet_service, None, 2).await; let expected_subscription = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); let expected_unsubscription = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { - assert_eq!(expected_subscription, events[subnets_per_node * 3]); - assert_eq!(expected_unsubscription, events[subnets_per_node * 3 + 2]); + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!(expected_subscription, events[0]); + assert_eq!(expected_unsubscription, events[2]); } - assert_eq!(attestation_service.subscription_count(), 2); + // Check that there are no more subscriptions + assert_eq!(subnet_service.subscriptions().count(), 0); println!("{events:?}"); let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the // advance subscription time - let wait_slots = attestation_service + let wait_slots = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_slot) @@ -623,90 +603,68 @@ mod attestation_service { .as_millis() as u64 / SLOT_DURATION_MILLIS; - let no_events = dbg!(get_events(&mut attestation_service, None, wait_slots as u32).await); + let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); assert_eq!(no_events, []); - let second_subscribe_event = get_events(&mut attestation_service, None, 2).await; - // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { + let second_subscribe_event = get_events(&mut subnet_service, None, 2).await; + // If the permanent and short lived subnets are different, we should get an unsubscription event. + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { assert_eq!( - [SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id1 - ))], + [ + expected_subscription.clone(), + expected_unsubscription.clone(), + ], second_subscribe_event[..] ); } + + let subscription_slot = current_slot + subscription_slot3 - 1; + + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + + assert_eq!(no_events, []); + + let third_subscribe_event = get_events(&mut subnet_service, None, 2).await; + + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!( + [expected_subscription, expected_unsubscription], + third_subscribe_event[..] + ); + } } #[tokio::test] - async fn test_update_deterministic_long_lived_subnets() { - let mut attestation_service = get_attestation_service(None); - let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; - - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = get_subscriptions(20, current_slot, 30, false); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); - - // There should only be the same subscriptions as there are in the specification, - // regardless of subscriptions - assert_eq!( - attestation_service.long_lived_subscriptions().len(), - subnets_per_node - ); - - let events = get_events(&mut attestation_service, None, 4).await; - - // Check that we attempt to subscribe and register ENRs - matches::assert_matches!( - events[..6], - [ - SubnetServiceMessage::Subscribe(_), - SubnetServiceMessage::EnrAdd(_), - SubnetServiceMessage::DiscoverPeers(_), - SubnetServiceMessage::Subscribe(_), - SubnetServiceMessage::EnrAdd(_), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); - } -} - -mod sync_committee_service { - use super::*; - - #[tokio::test] - async fn subscribe_and_unsubscribe() { + async fn subscribe_and_unsubscribe_sync_committee() { // subscription config let validator_index = 1; let until_epoch = Epoch::new(1); let sync_committee_indices = vec![1]; // create the attestation service and subscriptions - let mut sync_committee_service = get_sync_committee_service(); + let mut subnet_service = get_subnet_service(); + let _events = get_events(&mut subnet_service, None, 0).await; - let subscriptions = vec![SyncCommitteeSubscription { - validator_index, - sync_committee_indices: sync_committee_indices.clone(), - until_epoch, - }]; + let subscriptions = + std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + })); // submit the subscriptions - sync_committee_service - .validator_subscriptions(subscriptions) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions); + + // Remove permanent subscription events let subnet_ids = SyncSubnetId::compute_subnets_for_sync_committee::( &sync_committee_indices, @@ -716,7 +674,7 @@ mod sync_committee_service { // Note: the unsubscription event takes 2 epochs (8 * 2 * 0.4 secs = 3.2 secs) let events = get_events( - &mut sync_committee_service, + &mut subnet_service, Some(5), (MainnetEthSpec::slots_per_epoch() * 3) as u32, // Have some buffer time before getting 5 events ) @@ -738,7 +696,7 @@ mod sync_committee_service { ); // Should be unsubscribed at the end. - assert_eq!(sync_committee_service.subscription_count(), 0); + assert_eq!(subnet_service.subscriptions().count(), 0); } #[tokio::test] @@ -749,21 +707,22 @@ mod sync_committee_service { let sync_committee_indices = vec![1]; // create the attestation service and subscriptions - let mut sync_committee_service = get_sync_committee_service(); + let mut subnet_service = get_subnet_service(); + // Get the initial events from permanent subnet subscriptions + let _events = get_events(&mut subnet_service, None, 1).await; - let subscriptions = vec![SyncCommitteeSubscription { - validator_index, - sync_committee_indices: sync_committee_indices.clone(), - until_epoch, - }]; + let subscriptions = + std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + })); // submit the subscriptions - sync_committee_service - .validator_subscriptions(subscriptions) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut sync_committee_service, None, 1).await; + let events = get_events(&mut subnet_service, None, 1).await; matches::assert_matches!( events[..], [ @@ -777,28 +736,30 @@ mod sync_committee_service { // Event 1 is a duplicate of an existing subscription // Event 2 is the same subscription with lower `until_epoch` than the existing subscription let subscriptions = vec![ - SyncCommitteeSubscription { + Subscription::SyncCommittee(SyncCommitteeSubscription { validator_index, sync_committee_indices: sync_committee_indices.clone(), until_epoch, - }, - SyncCommitteeSubscription { + }), + Subscription::SyncCommittee(SyncCommitteeSubscription { validator_index, sync_committee_indices: sync_committee_indices.clone(), until_epoch: until_epoch - 1, - }, + }), ]; // submit the subscriptions - sync_committee_service - .validator_subscriptions(subscriptions) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut sync_committee_service, None, 1).await; + let events = get_events(&mut subnet_service, None, 1).await; matches::assert_matches!(events[..], [SubnetServiceMessage::DiscoverPeers(_),]); // Should be unsubscribed at the end. - assert_eq!(sync_committee_service.subscription_count(), 1); + let sync_committee_subscriptions = subnet_service + .subscriptions() + .filter(|s| matches!(s, Subnet::SyncCommittee(_))) + .count(); + assert_eq!(sync_committee_subscriptions, 1); } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 946d25237b..5703ed3504 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -158,26 +158,20 @@ impl BackFillSync { log: slog::Logger, ) -> Self { // Determine if backfill is enabled or not. - // Get the anchor info, if this returns None, then backfill is not required for this - // running instance. // If, for some reason a backfill has already been completed (or we've used a trusted // genesis root) then backfill has been completed. - - let (state, current_start) = match beacon_chain.store.get_anchor_info() { - Some(anchor_info) => { - if anchor_info.block_backfill_complete(beacon_chain.genesis_backfill_slot) { - (BackFillState::Completed, Epoch::new(0)) - } else { - ( - BackFillState::Paused, - anchor_info - .oldest_block_slot - .epoch(T::EthSpec::slots_per_epoch()), - ) - } - } - None => (BackFillState::NotRequired, Epoch::new(0)), - }; + let anchor_info = beacon_chain.store.get_anchor_info(); + let (state, current_start) = + if anchor_info.block_backfill_complete(beacon_chain.genesis_backfill_slot) { + (BackFillState::Completed, Epoch::new(0)) + } else { + ( + BackFillState::Paused, + anchor_info + .oldest_block_slot + .epoch(T::EthSpec::slots_per_epoch()), + ) + }; let bfs = BackFillSync { batches: BTreeMap::new(), @@ -253,25 +247,15 @@ impl BackFillSync { self.set_state(BackFillState::Syncing); // Obtain a new start slot, from the beacon chain and handle possible errors. - match self.reset_start_epoch() { - Err(ResetEpochError::SyncCompleted) => { - error!(self.log, "Backfill sync completed whilst in failed status"); - self.set_state(BackFillState::Completed); - return Err(BackFillError::InvalidSyncState(String::from( - "chain completed", - ))); - } - Err(ResetEpochError::NotRequired) => { - error!( - self.log, - "Backfill sync not required whilst in failed status" - ); - self.set_state(BackFillState::NotRequired); - return Err(BackFillError::InvalidSyncState(String::from( - "backfill not required", - ))); - } - Ok(_) => {} + if let Err(e) = self.reset_start_epoch() { + // This infallible match exists to force us to update this code if a future + // refactor of `ResetEpochError` adds a variant. + let ResetEpochError::SyncCompleted = e; + error!(self.log, "Backfill sync completed whilst in failed status"); + self.set_state(BackFillState::Completed); + return Err(BackFillError::InvalidSyncState(String::from( + "chain completed", + ))); } debug!(self.log, "Resuming a failed backfill sync"; "start_epoch" => self.current_start); @@ -279,9 +263,7 @@ impl BackFillSync { // begin requesting blocks from the peer pool, until all peers are exhausted. self.request_batches(network)?; } - BackFillState::Completed | BackFillState::NotRequired => { - return Ok(SyncStart::NotSyncing) - } + BackFillState::Completed => return Ok(SyncStart::NotSyncing), } Ok(SyncStart::Syncing { @@ -313,10 +295,7 @@ impl BackFillSync { peer_id: &PeerId, network: &mut SyncNetworkContext, ) -> Result<(), BackFillError> { - if matches!( - self.state(), - BackFillState::Failed | BackFillState::NotRequired - ) { + if matches!(self.state(), BackFillState::Failed) { return Ok(()); } @@ -1142,17 +1121,14 @@ impl BackFillSync { /// This errors if the beacon chain indicates that backfill sync has already completed or is /// not required. fn reset_start_epoch(&mut self) -> Result<(), ResetEpochError> { - if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { - if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { - Err(ResetEpochError::SyncCompleted) - } else { - self.current_start = anchor_info - .oldest_block_slot - .epoch(T::EthSpec::slots_per_epoch()); - Ok(()) - } + let anchor_info = self.beacon_chain.store.get_anchor_info(); + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { + Err(ResetEpochError::SyncCompleted) } else { - Err(ResetEpochError::NotRequired) + self.current_start = anchor_info + .oldest_block_slot + .epoch(T::EthSpec::slots_per_epoch()); + Ok(()) } } @@ -1160,13 +1136,12 @@ impl BackFillSync { fn check_completed(&mut self) -> bool { if self.would_complete(self.current_start) { // Check that the beacon chain agrees - if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { - // Conditions that we have completed a backfill sync - if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { - return true; - } else { - error!(self.log, "Backfill out of sync with beacon chain"); - } + let anchor_info = self.beacon_chain.store.get_anchor_info(); + // Conditions that we have completed a backfill sync + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { + return true; + } else { + error!(self.log, "Backfill out of sync with beacon chain"); } } false @@ -1195,6 +1170,4 @@ impl BackFillSync { enum ResetEpochError { /// The chain has already completed. SyncCompleted, - /// Backfill is not required. - NotRequired, } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f5e68d1512..5a11bca481 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -50,8 +50,6 @@ use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; mod single_block_lookup; -#[cfg(test)] -mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index d701cbbb8d..9bbd2bf295 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -171,7 +171,10 @@ impl SingleBlockLookup { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { - ComponentRequests::WaitingForBlock => true, + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, ComponentRequests::ActiveBlobRequest(request, _) => { request.state.is_awaiting_event() } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index cefededf34..2df8b5f94c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -362,6 +362,16 @@ impl SyncManager { self.sampling.get_request_status(block_root, index) } + #[cfg(test)] + pub(crate) fn range_sync_state(&self) -> super::range_sync::SyncChainStatus { + self.range_sync.state() + } + + #[cfg(test)] + pub(crate) fn update_execution_engine_state(&mut self, state: EngineState) { + self.handle_new_execution_engine_state(state); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } @@ -1188,22 +1198,14 @@ impl SyncManager { } fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { - // TODO(das): How is a consumer of sampling results? - // - Fork-choice for trailing DA - // - Single lookups to complete import requirements - // - Range sync to complete import requirements? Can sampling for syncing lag behind and - // accumulate in fork-choice? - match requester { SamplingRequester::ImportedBlock(block_root) => { debug!(self.log, "Sampling result"; "block_root" => %block_root, "result" => ?result); - // TODO(das): Consider moving SamplingResult to the beacon_chain crate and import - // here. No need to add too much enum variants, just whatever the beacon_chain or - // fork-choice needs to make a decision. Currently the fork-choice only needs to - // be notified of successful samplings, i.e. sampling failures don't trigger pruning match result { Ok(_) => { + // Notify the fork-choice of a successful sampling result to mark the block + // branch as safe. if let Err(e) = self .network .beacon_processor() diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 1dca6f02ac..0f5fd6fb9f 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,6 +9,8 @@ mod network_context; mod peer_sampling; mod peer_sync_info; mod range_sync; +#[cfg(test)] +mod tests; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 12c4009707..ed413cb459 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -775,13 +775,11 @@ impl SyncNetworkContext { let requester = CustodyRequester(id); let mut request = ActiveCustodyRequest::new( block_root, - // TODO(das): req_id is duplicated here, also present in id - CustodyId { requester, req_id }, + CustodyId { requester }, &custody_indexes_to_fetch, self.log.clone(), ); - // TODO(das): start request // Note that you can only send, but not handle a response here match request.continue_requests(self) { Ok(_) => { @@ -791,7 +789,6 @@ impl SyncNetworkContext { self.custody_by_root_requests.insert(requester, request); Ok(LookupRequestResult::RequestSent(req_id)) } - // TODO(das): handle this error properly Err(e) => Err(RpcRequestSendError::CustodyRequestError(e)), } } diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 7e725f5df5..289ed73cdd 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -24,7 +24,6 @@ pub type SamplingResult = Result<(), SamplingError>; type DataColumnSidecarList = Vec>>; pub struct Sampling { - // TODO(das): stalled sampling request are never cleaned up requests: HashMap>, sampling_config: SamplingConfig, log: slog::Logger, @@ -313,8 +312,8 @@ impl ActiveSamplingRequest { .iter() .position(|data| &data.index == column_index) else { - // Peer does not have the requested data. - // TODO(das) what to do? + // Peer does not have the requested data, mark peer as "dont have" and try + // again with a different peer. debug!(self.log, "Sampling peer claims to not have the data"; "block_root" => %self.block_root, @@ -373,7 +372,9 @@ impl ActiveSamplingRequest { sampling_request_id, }, ) { - // TODO(das): Beacon processor is overloaded, what should we do? + // Beacon processor is overloaded, drop sampling attempt. Failing to sample + // is not a permanent state so we should recover once the node has capacity + // and receives a descendant block. error!(self.log, "Dropping sampling"; "block" => %self.block_root, @@ -391,8 +392,8 @@ impl ActiveSamplingRequest { ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); - // Error downloading, maybe penalize peer and retry again. - // TODO(das) with different peer or different peer? + // Error downloading, malicious network errors are already penalized before + // reaching this function. Mark the peer as failed and try again with another. for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { warn!(self.log, @@ -453,7 +454,7 @@ impl ActiveSamplingRequest { debug!(self.log, "Sample verification failure"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "reason" => ?err); metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::FAILURE]); - // TODO(das): Peer sent invalid data, penalize and try again from different peer + // Peer sent invalid data, penalize and try again from different peer // TODO(das): Count individual failures for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs deleted file mode 100644 index df49543a6b..0000000000 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ /dev/null @@ -1,13 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::Hash256; - -/// Trait that helps maintain RangeSync's implementation split from the BeaconChain -pub trait BlockStorage { - fn is_block_known(&self, block_root: &Hash256) -> bool; -} - -impl BlockStorage for BeaconChain { - fn is_block_known(&self, block_root: &Hash256) -> bool { - self.block_is_known_to_fork_choice(block_root) - } -} diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 732e4a7bd1..51d9d9da37 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -8,9 +8,9 @@ use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, Ba use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; -use lighthouse_metrics::set_int_gauge; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; +use metrics::set_int_gauge; use rand::seq::SliceRandom; use rand::Rng; use slog::{crit, debug, o, warn}; diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 1217fbf8fe..c030d0a19e 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,12 +3,11 @@ //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. -use super::block_storage::BlockStorage; use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; @@ -37,10 +36,13 @@ pub enum RangeSyncState { Idle, } +pub type SyncChainStatus = + Result, &'static str>; + /// A collection of finalized and head chains currently being processed. -pub struct ChainCollection { +pub struct ChainCollection { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap>, /// The set of head chains being synced. @@ -51,8 +53,8 @@ pub struct ChainCollection { log: slog::Logger, } -impl ChainCollection { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { +impl ChainCollection { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), @@ -213,9 +215,7 @@ impl ChainCollection { } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self @@ -409,7 +409,8 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) + target_slot <= &local_finalized_slot + || beacon_chain.block_is_known_to_fork_choice(target_root) }; // Retain only head peers that remain relevant diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index d0f2f9217e..8f881fba90 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -2,7 +2,6 @@ //! peers. mod batch; -mod block_storage; mod chain; mod chain_collection; mod range; @@ -13,5 +12,7 @@ pub use batch::{ ByRangeRequestType, }; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; +#[cfg(test)] +pub use chain_collection::SyncChainStatus; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 0ef99838de..78679403bb 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -39,9 +39,8 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; -use super::chain_collection::ChainCollection; +use super::chain_collection::{ChainCollection, SyncChainStatus}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::status::ToStatusMessage; @@ -56,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; @@ -64,27 +63,26 @@ const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This /// holds the current state of the long range sync. -pub struct RangeSync> { +pub struct RangeSync { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// Last known sync info of our useful connected peers. We use this information to create Head /// chains after all finalized chains have ended. awaiting_head_peers: HashMap, /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. - chains: ChainCollection, + chains: ChainCollection, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache, /// The syncing logger. log: slog::Logger, } -impl RangeSync +impl RangeSync where - C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), @@ -96,9 +94,7 @@ where } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { self.chains.state() } @@ -382,465 +378,3 @@ where } } } - -#[cfg(test)] -mod tests { - use crate::network_beacon_processor::NetworkBeaconProcessor; - use crate::sync::SyncMessage; - use crate::NetworkMessage; - - use super::*; - use crate::sync::network_context::{BlockOrBlob, RangeRequestId}; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; - use beacon_chain::parking_lot::RwLock; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use beacon_chain::EngineState; - use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::service::api_types::SyncRequestId; - use lighthouse_network::{ - rpc::StatusMessage, service::api_types::AppRequestId, NetworkConfig, NetworkGlobals, - }; - use slog::{o, Drain}; - use slot_clock::TestingSlotClock; - use std::collections::HashSet; - use store::MemoryStore; - use tokio::sync::mpsc; - use types::{FixedBytesExtended, ForkName, MinimalEthSpec as E}; - - #[derive(Debug)] - struct FakeStorage { - known_blocks: RwLock>, - status: RwLock, - } - - impl Default for FakeStorage { - fn default() -> Self { - FakeStorage { - known_blocks: RwLock::new(HashSet::new()), - status: RwLock::new(StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::zero(), - finalized_epoch: 0usize.into(), - head_root: Hash256::zero(), - head_slot: 0usize.into(), - }), - } - } - } - - impl FakeStorage { - fn remember_block(&self, block_root: Hash256) { - self.known_blocks.write().insert(block_root); - } - - #[allow(dead_code)] - fn forget_block(&self, block_root: &Hash256) { - self.known_blocks.write().remove(block_root); - } - } - - impl BlockStorage for FakeStorage { - fn is_block_known(&self, block_root: &store::Hash256) -> bool { - self.known_blocks.read().contains(block_root) - } - } - - impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> StatusMessage { - self.status.read().clone() - } - } - - type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; - - fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - - #[allow(unused)] - struct TestRig { - log: slog::Logger, - /// To check what does sync send to the beacon processor. - beacon_processor_rx: mpsc::Receiver>, - /// To set up different scenarios where sync is told about known/unknown blocks. - chain: Arc, - /// Needed by range to handle communication with the network. - cx: SyncNetworkContext, - /// To check what the network receives from Range. - network_rx: mpsc::UnboundedReceiver>, - /// To modify what the network declares about various global variables, in particular about - /// the sync state of a peer. - globals: Arc>, - } - - impl RangeSync { - fn assert_state(&self, expected_state: RangeSyncType) { - assert_eq!( - self.state() - .expect("State is ok") - .expect("Range is syncing") - .0, - expected_state - ) - } - - #[allow(dead_code)] - fn assert_not_syncing(&self) { - assert!( - self.state().expect("State is ok").is_none(), - "Range should not be syncing." - ); - } - } - - impl TestRig { - fn local_info(&self) -> SyncInfo { - let StatusMessage { - fork_digest: _, - finalized_root, - finalized_epoch, - head_root, - head_slot, - } = self.chain.status.read().clone(); - SyncInfo { - head_slot, - head_root, - finalized_epoch, - finalized_root, - } - } - - /// Reads an BlocksByRange request to a given peer from the network receiver channel. - #[track_caller] - fn grab_request( - &mut self, - expected_peer: &PeerId, - fork_name: ForkName, - ) -> (AppRequestId, Option) { - let block_req_id = if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - request_id - } else { - panic!("Should have sent a batch request to the peer") - }; - let blob_req_id = if fork_name.deneb_enabled() { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } - } else { - None - }; - (block_req_id, blob_req_id) - } - - fn complete_range_block_and_blobs_response( - &mut self, - block_req: AppRequestId, - blob_req_opt: Option, - ) -> (ChainId, BatchId, Id) { - if blob_req_opt.is_some() { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let _ = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)); - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Blob(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } else { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } - } - - fn unwrap_range_request_id(sender_id: RangeRequestId) -> (ChainId, BatchId) { - if let RangeRequestId::RangeSync { chain_id, batch_id } = sender_id { - (chain_id, batch_id) - } else { - panic!("expected RangeSync request: {:?}", sender_id) - } - } - - /// Produce a head peer - fn head_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - // Get a peer with an advanced head - let head_root = Hash256::random(); - let head_slot = local_info.head_slot + 1; - let remote_info = SyncInfo { - head_root, - head_slot, - ..local_info - }; - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - fn finalized_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - let finalized_root = Hash256::random(); - let finalized_epoch = local_info.finalized_epoch + 2; - let head_slot = finalized_epoch.start_slot(E::slots_per_epoch()); - let head_root = Hash256::random(); - let remote_info = SyncInfo { - finalized_epoch, - finalized_root, - head_slot, - head_root, - }; - - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - #[track_caller] - fn expect_empty_processor(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - panic!( - "Expected empty processor. Instead got {}", - work.work_type_str() - ); - } - Err(e) => match e { - mpsc::error::TryRecvError::Empty => {} - mpsc::error::TryRecvError::Disconnected => unreachable!("bad coded test?"), - }, - } - } - - #[track_caller] - fn expect_chain_segment(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::WorkType::ChainSegment); - } - other => panic!("Expected chain segment process, found {:?}", other), - } - } - } - - fn range(log_enabled: bool) -> (TestRig, RangeSync) { - let log = build_log(slog::Level::Trace, log_enabled); - // Initialise a new beacon chain - let harness = BeaconChainHarness::>::builder(E) - .default_spec() - .logger(log.clone()) - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - let chain = harness.chain; - - let fake_store = Arc::new(FakeStorage::default()); - let range_sync = RangeSync::::new( - fake_store.clone(), - log.new(o!("component" => "range")), - ); - let (network_tx, network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); - let network_config = Arc::new(NetworkConfig::default()); - let globals = Arc::new(NetworkGlobals::new_test_globals( - Vec::new(), - &log, - network_config, - chain.spec.clone(), - )); - let (network_beacon_processor, beacon_processor_rx) = - NetworkBeaconProcessor::null_for_testing( - globals.clone(), - sync_tx, - chain.clone(), - harness.runtime.task_executor.clone(), - log.clone(), - ); - let cx = SyncNetworkContext::new( - network_tx, - Arc::new(network_beacon_processor), - chain, - log.new(o!("component" => "network_context")), - ); - let test_rig = TestRig { - log, - beacon_processor_rx, - chain: fake_store, - cx, - network_rx, - globals, - }; - (test_rig, range_sync) - } - - #[test] - fn head_chain_removed_while_finalized_syncing() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(false); - - // Get a peer with an advanced head - let (head_peer, local_info, remote_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, head_peer, remote_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Fail the head chain by disconnecting the peer. - range.remove_peer(&mut rig.cx, &head_peer); - range.assert_state(RangeSyncType::Finalized); - } - - #[test] - fn state_update_while_purging() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(true); - - // Get a peer with an advanced head - let (head_peer, local_info, head_info) = rig.head_peer(); - let head_peer_root = head_info.head_root; - range.add_peer(&mut rig.cx, local_info, head_peer, head_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - let finalized_peer_root = remote_info.finalized_root; - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Now the chain knows both chains target roots. - rig.chain.remember_block(head_peer_root); - rig.chain.remember_block(finalized_peer_root); - - // Add an additional peer to the second chain to make range update it's status - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - } - - #[test] - fn pause_and_resume_on_ee_offline() { - let (mut rig, mut range) = range(true); - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // add some peers - let (peer1, local_info, head_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork); - - let (chain1, batch1, id1) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // make the ee offline - rig.cx.update_execution_engine_state(EngineState::Offline); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // while the ee is offline, more peers might arrive. Add a new finalized peer. - let (peer2, local_info, finalized_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork); - - let (chain2, batch2, id2) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // make the beacon processor available again. - rig.cx.update_execution_engine_state(EngineState::Online); - - // now resume range, we should have two processing requests in the beacon processor. - range.resume(&mut rig.cx); - - rig.expect_chain_segment(); - rig.expect_chain_segment(); - } -} diff --git a/beacon_node/network/src/sync/range_sync/sync_type.rs b/beacon_node/network/src/sync/range_sync/sync_type.rs index d6ffd4a5df..4ff7e39310 100644 --- a/beacon_node/network/src/sync/range_sync/sync_type.rs +++ b/beacon_node/network/src/sync/range_sync/sync_type.rs @@ -1,10 +1,9 @@ //! Contains logic about identifying which Sync to perform given PeerSyncInfo of ourselves and //! of a remote. +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::SyncInfo; -use super::block_storage::BlockStorage; - /// The type of Range sync that should be done relative to our current state. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RangeSyncType { @@ -17,8 +16,8 @@ pub enum RangeSyncType { impl RangeSyncType { /// Determines the type of sync given our local `PeerSyncInfo` and the remote's /// `PeerSyncInfo`. - pub fn new( - chain: &C, + pub fn new( + chain: &BeaconChain, local_info: &SyncInfo, remote_info: &SyncInfo, ) -> RangeSyncType { @@ -29,7 +28,7 @@ impl RangeSyncType { // not seen the finalized hash before. if remote_info.finalized_epoch > local_info.finalized_epoch - && !chain.is_block_known(&remote_info.finalized_root) + && !chain.block_is_known_to_fork_choice(&remote_info.finalized_root) { RangeSyncType::Finalized } else { diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/tests/lookups.rs similarity index 95% rename from beacon_node/network/src/sync/block_lookups/tests.rs rename to beacon_node/network/src/sync/tests/lookups.rs index e460287afb..8c88ddb62d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,98 +1,50 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::peer_sampling::SamplingConfig; -use crate::sync::range_sync::RangeSyncType; -use crate::sync::{SamplingId, SyncMessage}; +use crate::sync::block_lookups::{ + BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::{ + manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + peer_sampling::SamplingConfig, + SamplingId, SyncMessage, +}; use crate::NetworkMessage; use std::sync::Arc; +use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; -use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::block_verification_types::BlockImportData; -use beacon_chain::builder::Witness; -use beacon_chain::data_availability_checker::Availability; -use beacon_chain::eth1_chain::CachingEth1Backend; -use beacon_chain::test_utils::{ - build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, -}; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ - AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, + blob_verification::GossipVerifiedBlob, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::Availability, + test_utils::{ + build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, + }, + validator_monitor::timestamp_now, + AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, + PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::{RPCError, RequestType, RpcErrorResponse}; -use lighthouse_network::service::api_types::{ - AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingRequester, - SingleLookupReqId, SyncRequestId, +use lighthouse_network::{ + rpc::{RPCError, RequestType, RpcErrorResponse}, + service::api_types::{ + AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingRequester, SingleLookupReqId, SyncRequestId, + }, + types::SyncState, + NetworkConfig, NetworkGlobals, PeerId, }; -use lighthouse_network::types::SyncState; -use lighthouse_network::NetworkConfig; -use lighthouse_network::NetworkGlobals; use slog::info; -use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; -use store::MemoryStore; +use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; -use types::data_column_sidecar::ColumnIndex; -use types::test_utils::TestRandom; use types::{ - test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, + Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; -use types::{BeaconState, BeaconStateBase, ChainSpec}; -use types::{DataColumnSidecar, Epoch}; - -type T = Witness, E, MemoryStore, MemoryStore>; - -/// This test utility enables integration testing of Lighthouse sync components. -/// -/// It covers the following: -/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. -/// 2. Making assertions on `WorkEvent`s received from sync -/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). -/// -/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: -/// +-----------------+ -/// | BeaconProcessor | -/// +---------+-------+ -/// ^ | -/// | | -/// WorkEvent | | SyncMsg -/// | | (Result) -/// | v -/// +--------+ +-----+-----------+ +----------------+ -/// | Router +----------->| SyncManager +------------>| NetworkService | -/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ -/// (RPC resp) | - RangeSync | (RPC req) -/// +-----------------+ -/// | - BackFillSync | -/// +-----------------+ -/// | - BlockLookups | -/// +-----------------+ -struct TestRig { - /// Receiver for `BeaconProcessor` events (e.g. block processing results). - beacon_processor_rx: mpsc::Receiver>, - beacon_processor_rx_queue: Vec>, - /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) - network_rx: mpsc::UnboundedReceiver>, - /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) - network_rx_queue: Vec>, - /// Receiver for `SyncMessage` from the network - sync_rx: mpsc::UnboundedReceiver>, - /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. - sync_manager: SyncManager, - /// To manipulate sync state and peer connection status - network_globals: Arc>, - /// Beacon chain harness - harness: BeaconChainHarness>, - /// `rng` for generating test blocks and blobs. - rng: XorShiftRng, - fork_name: ForkName, - spec: ChainSpec, - log: Logger, -} const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; @@ -131,6 +83,7 @@ impl TestRig { .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() + .mock_execution_layer() .testing_slot_clock(TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), @@ -195,7 +148,7 @@ impl TestRig { } } - fn test_setup() -> Self { + pub fn test_setup() -> Self { Self::test_setup_with_config(None) } @@ -219,11 +172,11 @@ impl TestRig { } } - fn log(&self, msg: &str) { + pub fn log(&self, msg: &str) { info!(self.log, "TEST_RIG"; "msg" => msg); } - fn after_deneb(&self) -> bool { + pub fn after_deneb(&self) -> bool { matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) } @@ -289,7 +242,7 @@ impl TestRig { (parent, block, parent_root, block_root) } - fn send_sync_message(&mut self, sync_message: SyncMessage) { + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { self.sync_manager.handle_message(sync_message); } @@ -420,7 +373,7 @@ impl TestRig { self.expect_empty_network(); } - fn new_connected_peer(&mut self) -> PeerId { + pub fn new_connected_peer(&mut self) -> PeerId { self.network_globals .peers .write() @@ -862,7 +815,7 @@ impl TestRig { } } - fn peer_disconnected(&mut self, peer_id: PeerId) { + pub fn peer_disconnected(&mut self, peer_id: PeerId) { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } @@ -878,7 +831,7 @@ impl TestRig { } } - fn pop_received_network_event) -> Option>( + pub fn pop_received_network_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -898,7 +851,7 @@ impl TestRig { } } - fn pop_received_processor_event) -> Option>( + pub fn pop_received_processor_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -922,6 +875,16 @@ impl TestRig { } } + pub fn expect_empty_processor(&mut self) { + self.drain_processor_rx(); + if !self.beacon_processor_rx_queue.is_empty() { + panic!( + "Expected processor to be empty, but has events: {:?}", + self.beacon_processor_rx_queue + ); + } + } + fn find_block_lookup_request( &mut self, for_block: Hash256, @@ -2224,7 +2187,8 @@ fn custody_lookup_happy_path() { mod deneb_only { use super::*; use beacon_chain::{ - block_verification_types::RpcBlock, data_availability_checker::AvailabilityCheckError, + block_verification_types::{AsBlock, RpcBlock}, + data_availability_checker::AvailabilityCheckError, }; use std::collections::VecDeque; use types::RuntimeVariableList; diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs new file mode 100644 index 0000000000..6ed5c7f8fa --- /dev/null +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -0,0 +1,68 @@ +use crate::sync::manager::SyncManager; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use crate::NetworkMessage; +use beacon_chain::builder::Witness; +use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_processor::WorkEvent; +use lighthouse_network::NetworkGlobals; +use slog::Logger; +use slot_clock::ManualSlotClock; +use std::sync::Arc; +use store::MemoryStore; +use tokio::sync::mpsc; +use types::{test_utils::XorShiftRng, ChainSpec, ForkName, MinimalEthSpec as E}; + +mod lookups; +mod range; + +type T = Witness, E, MemoryStore, MemoryStore>; + +/// This test utility enables integration testing of Lighthouse sync components. +/// +/// It covers the following: +/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. +/// 2. Making assertions on `WorkEvent`s received from sync +/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). +/// +/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: +/// +-----------------+ +/// | BeaconProcessor | +/// +---------+-------+ +/// ^ | +/// | | +/// WorkEvent | | SyncMsg +/// | | (Result) +/// | v +/// +--------+ +-----+-----------+ +----------------+ +/// | Router +----------->| SyncManager +------------>| NetworkService | +/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ +/// (RPC resp) | - RangeSync | (RPC req) +/// +-----------------+ +/// | - BackFillSync | +/// +-----------------+ +/// | - BlockLookups | +/// +-----------------+ +struct TestRig { + /// Receiver for `BeaconProcessor` events (e.g. block processing results). + beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, + /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) + network_rx: mpsc::UnboundedReceiver>, + /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) + network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, + /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. + sync_manager: SyncManager, + /// To manipulate sync state and peer connection status + network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, + /// `rng` for generating test blocks and blobs. + rng: XorShiftRng, + fork_name: ForkName, + log: Logger, + spec: Arc, +} diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs new file mode 100644 index 0000000000..6faa8b7247 --- /dev/null +++ b/beacon_node/network/src/sync/tests/range.rs @@ -0,0 +1,273 @@ +use super::*; +use crate::status::ToStatusMessage; +use crate::sync::manager::SLOT_IMPORT_TOLERANCE; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; +use beacon_chain::EngineState; +use lighthouse_network::rpc::{RequestType, StatusMessage}; +use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; +use lighthouse_network::{PeerId, SyncInfo}; +use std::time::Duration; +use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; + +const D: Duration = Duration::new(0, 0); + +impl TestRig { + /// Produce a head peer with an advanced head + fn add_head_peer(&mut self) -> PeerId { + self.add_head_peer_with_root(Hash256::random()) + } + + /// Produce a head peer with an advanced head + fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { + let local_info = self.local_info(); + self.add_peer(SyncInfo { + head_root, + head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), + ..local_info + }) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer(&mut self) -> PeerId { + self.add_finalized_peer_with_root(Hash256::random()) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { + let local_info = self.local_info(); + let finalized_epoch = local_info.finalized_epoch + 2; + self.add_peer(SyncInfo { + finalized_epoch, + finalized_root, + head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), + head_root: Hash256::random(), + }) + } + + fn local_info(&self) -> SyncInfo { + let StatusMessage { + fork_digest: _, + finalized_root, + finalized_epoch, + head_root, + head_slot, + } = self.harness.chain.status_message(); + SyncInfo { + head_slot, + head_root, + finalized_epoch, + finalized_root, + } + } + + fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId { + // Create valid peer known to network globals + let peer_id = self.new_connected_peer(); + // Send peer to sync + self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info.clone())); + peer_id + } + + fn assert_state(&self, state: RangeSyncType) { + assert_eq!( + self.sync_manager + .range_sync_state() + .expect("State is ok") + .expect("Range should be syncing") + .0, + state, + "not expected range sync state" + ); + } + + #[track_caller] + fn expect_chain_segment(&mut self) { + self.pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expect ChainSegment work event: {e:?}")); + } + + fn update_execution_engine_state(&mut self, state: EngineState) { + self.log(&format!("execution engine state updated: {state:?}")); + self.sync_manager.update_execution_engine_state(state); + } + + fn find_blocks_by_range_request(&mut self, target_peer_id: &PeerId) -> (Id, Option) { + let block_req_id = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlocksByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blocks by range request"); + + let blob_req_id = if self.after_deneb() { + Some( + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlobsByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blobs by range request"), + ) + } else { + None + }; + + (block_req_id, blob_req_id) + } + + fn find_and_complete_blocks_by_range_request(&mut self, target_peer_id: PeerId) { + let (blocks_req_id, blobs_req_id) = self.find_blocks_by_range_request(&target_peer_id); + + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlocksByRange request {blocks_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlock { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, + peer_id: target_peer_id, + beacon_block: None, + seen_timestamp: D, + }); + + if let Some(blobs_req_id) = blobs_req_id { + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlobsByRange request {blobs_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlob { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blobs_req_id }, + peer_id: target_peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + } + + async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + self.harness.advance_slot(); + + let block_root = self + .harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness + .chain + .store + .get_full_block(&block_root) + .unwrap() + .unwrap() + } + + async fn remember_block(&mut self, block: SignedBeaconBlock) { + self.harness + .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + .await + .unwrap(); + } +} + +#[test] +fn head_chain_removed_while_finalized_syncing() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2821 + let mut rig = TestRig::test_setup(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer(); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer(); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Fail the head chain by disconnecting the peer. + rig.peer_disconnected(head_peer); + rig.assert_state(RangeSyncType::Finalized); +} + +#[tokio::test] +async fn state_update_while_purging() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2827 + let mut rig = TestRig::test_setup(); + + // Create blocks on a separate harness + let mut rig_2 = TestRig::test_setup(); + // Need to create blocks that can be inserted into the fork-choice and fit the "known + // conditions" below. + let head_peer_block = rig_2.create_canonical_block().await; + let head_peer_root = head_peer_block.canonical_root(); + let finalized_peer_block = rig_2.create_canonical_block().await; + let finalized_peer_root = finalized_peer_block.canonical_root(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer_with_root(head_peer_root); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Now the chain knows both chains target roots. + rig.remember_block(head_peer_block).await; + rig.remember_block(finalized_peer_block).await; + + // Add an additional peer to the second chain to make range update it's status + rig.add_finalized_peer(); +} + +#[test] +fn pause_and_resume_on_ee_offline() { + let mut rig = TestRig::test_setup(); + + // add some peers + let peer1 = rig.add_head_peer(); + // make the ee offline + rig.update_execution_engine_state(EngineState::Offline); + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer1); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // while the ee is offline, more peers might arrive. Add a new finalized peer. + let peer2 = rig.add_finalized_peer(); + + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer2); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + // make the beacon processor available again. + // update_execution_engine_state implicitly calls resume + // now resume range, we should have two processing requests in the beacon processor. + rig.update_execution_engine_state(EngineState::Online); + + rig.expect_chain_segment(); + rig.expect_chain_segment(); +} diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cbf6284f2a..5b48e3f0d8 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -7,7 +7,7 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } itertools = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } types = { workspace = true } state_processing = { workspace = true } @@ -25,4 +25,4 @@ tokio = { workspace = true } maplit = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 4de9d351f3..083c1170f0 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -105,7 +105,7 @@ impl SplitAttestation { } } -impl<'a, E: EthSpec> CompactAttestationRef<'a, E> { +impl CompactAttestationRef<'_, E> { pub fn attestation_data(&self) -> AttestationData { AttestationData { slot: self.data.slot, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 3a002bf870..d01c73118c 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -877,11 +877,11 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); // Only run this test on the phase0 hard-fork. - if spec.altair_fork_epoch != None { + if spec.altair_fork_epoch.is_some() { return; } - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) @@ -902,10 +902,10 @@ mod release_tests { ); for (atts, aggregate) in &attestations { - let att2 = aggregate.as_ref().unwrap().message().aggregate().clone(); + let att2 = aggregate.as_ref().unwrap().message().aggregate(); let att1 = atts - .into_iter() + .iter() .map(|(att, _)| att) .take(2) .fold::>, _>(None, |att, new_att| { @@ -946,7 +946,7 @@ mod release_tests { .unwrap(); assert_eq!( - committees.get(0).unwrap().committee.len() - 2, + committees.first().unwrap().committee.len() - 2, earliest_attestation_validators( &att2_split.as_ref(), &state, @@ -963,7 +963,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); let op_pool = OperationPool::::new(); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state @@ -1020,7 +1020,7 @@ mod release_tests { let agg_att = &block_attestations[0]; assert_eq!( agg_att.num_set_aggregation_bits(), - spec.target_committee_size as usize + spec.target_committee_size ); // Prune attestations shouldn't do anything at this point. @@ -1039,7 +1039,7 @@ mod release_tests { fn attestation_duplicate() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1082,7 +1082,7 @@ mod release_tests { fn attestation_pairwise_overlapping() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1113,19 +1113,17 @@ mod release_tests { let aggs1 = atts1 .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1136,19 +1134,17 @@ mod release_tests { .as_slice() .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1181,7 +1177,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1194,7 +1190,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; let num_validators = num_committees * MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; @@ -1209,12 +1205,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1296,7 +1292,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); let slot = state.slot(); @@ -1308,7 +1304,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). @@ -1329,12 +1325,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1615,7 +1611,6 @@ mod release_tests { let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1674,7 +1669,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1711,7 +1705,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1791,7 +1784,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, diff --git a/beacon_node/operation_pool/src/metrics.rs b/beacon_node/operation_pool/src/metrics.rs index e2a8b43ed1..14088688e5 100644 --- a/beacon_node/operation_pool/src/metrics.rs +++ b/beacon_node/operation_pool/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static BUILD_REWARD_CACHE_TIME: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index dff030fb0f..cecfcee868 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -86,6 +86,24 @@ pub fn cli_app() -> Command { .hide(true) .display_order(0) ) + .arg( + Arg::new("blob-publication-batches") + .long("blob-publication-batches") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .help("Number of batches that the node splits blobs or data columns into during publication. This doesn't apply if the node is the block proposer. Used in PeerDAS only.") + .display_order(0) + .hide(true) + ) + .arg( + Arg::new("blob-publication-batch-interval") + .long("blob-publication-batch-interval") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .help("The delay in milliseconds applied by the node between sending each blob or data column batch. This doesn't apply if the node is the block proposer.") + .display_order(0) + .hide(true) + ) .arg( Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") @@ -675,8 +693,7 @@ pub fn cli_app() -> Command { Arg::new("staking") .long("staking") .help("Standard option for a staking beacon node. This will enable the HTTP server \ - on localhost:5052 and import deposit logs from the execution node. This is \ - equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge") + on localhost:5052 and import deposit logs from the execution node.") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) @@ -688,21 +705,21 @@ pub fn cli_app() -> Command { .arg( Arg::new("eth1") .long("eth1") - .help("If present the node will connect to an eth1 node. This is required for \ - block production, you must use this flag if you wish to serve a validator.") + .help("DEPRECATED") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) + .hide(true) ) .arg( Arg::new("dummy-eth1") .long("dummy-eth1") + .help("DEPRECATED") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .conflicts_with("eth1") - .help("If present, uses an eth1 backend that generates static dummy data.\ - Identical to the method used at the 2019 Canada interop.") .display_order(0) + .hide(true) ) .arg( Arg::new("eth1-purge-cache") @@ -738,9 +755,23 @@ pub fn cli_app() -> Command { Arg::new("slots-per-restore-point") .long("slots-per-restore-point") .value_name("SLOT_COUNT") - .help("Specifies how often a freezer DB restore point should be stored. \ - Cannot be changed after initialization. \ - [default: 8192 (mainnet) or 64 (minimal)]") + .help("DEPRECATED. This flag has no effect.") + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("hierarchy-exponents") + .long("hierarchy-exponents") + .value_name("EXPONENTS") + .help("Specifies the frequency for storing full state snapshots and hierarchical \ + diffs in the freezer DB. Accepts a comma-separated list of ascending \ + exponents. Each exponent defines an interval for storing diffs to the layer \ + above. The last exponent defines the interval for full snapshots. \ + For example, a config of '4,8,12' would store a full snapshot every \ + 4096 (2^12) slots, first-level diffs every 256 (2^8) slots, and second-level \ + diffs every 16 (2^4) slots. \ + Cannot be changed after initialization. \ + [default: 5,9,11,13,16,18,21]") .action(ArgAction::Set) .display_order(0) ) @@ -768,11 +799,24 @@ pub fn cli_app() -> Command { Arg::new("historic-state-cache-size") .long("historic-state-cache-size") .value_name("SIZE") - .help("Specifies how many states from the freezer database should cache in memory") + .help("Specifies how many states from the freezer database should be cached in \ + memory") .default_value("1") .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("hdiff-buffer-cache-size") + .long("hdiff-buffer-cache-size") + .value_name("SIZE") + .help("Number of hierarchical diff (hdiff) buffers to cache in memory. Each buffer \ + is around the size of a BeaconState so you should be cautious about setting \ + this value too high. This flag is irrelevant for most nodes, which run with \ + state pruning enabled.") + .default_value("16") + .action(ArgAction::Set) + .display_order(0) + ) .arg( Arg::new("state-cache-size") .long("state-cache-size") @@ -793,6 +837,7 @@ pub fn cli_app() -> Command { .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") + .required(true) .action(ArgAction::Set) .display_order(0) ) @@ -987,7 +1032,6 @@ pub fn cli_app() -> Command { .default_value("0") .display_order(0) ) - /* * Misc. */ @@ -1444,6 +1488,7 @@ pub fn cli_app() -> Command { Useful if you intend to run a non-validating beacon node.") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) + .conflicts_with("staking") .display_order(0) ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 2d31815351..8d8a44a6fd 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -121,7 +121,6 @@ pub fn get_config( if cli_args.get_flag("staking") { client_config.http_api.enabled = true; - client_config.sync_eth1_chain = true; } /* @@ -192,6 +191,15 @@ pub fn get_config( client_config.chain.enable_sampling = true; } + if let Some(batches) = clap_utils::parse_optional(cli_args, "blob-publication-batches")? { + client_config.chain.blob_publication_batches = batches; + } + + if let Some(interval) = clap_utils::parse_optional(cli_args, "blob-publication-batch-interval")? + { + client_config.chain.blob_publication_batch_interval = Duration::from_millis(interval); + } + /* * Prometheus metrics HTTP server */ @@ -254,18 +262,12 @@ pub fn get_config( * Eth1 */ - // When present, use an eth1 backend that generates deterministic junk. - // - // Useful for running testnets without the overhead of a deposit contract. if cli_args.get_flag("dummy-eth1") { - client_config.dummy_eth1_backend = true; + warn!(log, "The --dummy-eth1 flag is deprecated"); } - // When present, attempt to sync to an eth1 node. - // - // Required for block production. if cli_args.get_flag("eth1") { - client_config.sync_eth1_chain = true; + warn!(log, "The --eth1 flag is deprecated"); } if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { @@ -284,93 +286,84 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if let Some(endpoints) = cli_args.get_one::("execution-endpoint") { - let mut el_config = execution_layer::Config::default(); + // `--execution-endpoint` is required now. + let endpoints: String = clap_utils::parse_required(cli_args, "execution-endpoint")?; + let mut el_config = execution_layer::Config::default(); - // Always follow the deposit contract when there is an execution endpoint. - // - // This is wasteful for non-staking nodes as they have no need to process deposit contract - // logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or - // `--staking` flags, however that poses a risk to stakers since they cannot produce blocks - // without "eth1". - // - // The waste for non-staking nodes is relatively small so we err on the side of safety for - // stakers. The merge is already complicated enough. - client_config.sync_eth1_chain = true; + // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. + let execution_endpoint = parse_only_one_value( + endpoints.as_str(), + SensitiveUrl::parse, + "--execution-endpoint", + log, + )?; - // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. - let execution_endpoint = - parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; + // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via + // file_path or directly as string. - // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via - // file_path or directly as string. + let secret_file: PathBuf; + // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. + if let Some(secret_files) = cli_args.get_one::("execution-jwt") { + secret_file = + parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; - let secret_file: PathBuf; - // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. - if let Some(secret_files) = cli_args.get_one::("execution-jwt") { - secret_file = - parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; - - // Check if the JWT secret key is passed directly via cli flag and persist it to the default - // file location. - } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") - { - use std::fs::File; - use std::io::Write; - secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); - let mut jwt_secret_key_file = File::create(secret_file.clone()) - .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; - jwt_secret_key_file - .write_all(jwt_secret_key.as_bytes()) - .map_err(|e| { - format!( - "Error occurred while writing to jwt_secret_key file: {:?}", - e - ) - })?; - } else { - return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); - } - - // Parse and set the payload builder, if any. - if let Some(endpoint) = cli_args.get_one::("builder") { - let payload_builder = - parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; - el_config.builder_url = Some(payload_builder); - - el_config.builder_user_agent = - clap_utils::parse_optional(cli_args, "builder-user-agent")?; - - el_config.builder_header_timeout = - clap_utils::parse_optional(cli_args, "builder-header-timeout")? - .map(Duration::from_millis); - } - - // Set config values from parse values. - el_config.secret_file = Some(secret_file.clone()); - el_config.execution_endpoint = Some(execution_endpoint.clone()); - el_config.suggested_fee_recipient = - clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; - el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; - el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; - el_config - .default_datadir - .clone_from(client_config.data_dir()); - let execution_timeout_multiplier = - clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; - el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); - - client_config.eth1.endpoint = Eth1Endpoint::Auth { - endpoint: execution_endpoint, - jwt_path: secret_file, - jwt_id: el_config.jwt_id.clone(), - jwt_version: el_config.jwt_version.clone(), - }; - - // Store the EL config in the client config. - client_config.execution_layer = Some(el_config); + // Check if the JWT secret key is passed directly via cli flag and persist it to the default + // file location. + } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") { + use std::fs::File; + use std::io::Write; + secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); + let mut jwt_secret_key_file = File::create(secret_file.clone()) + .map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?; + jwt_secret_key_file + .write_all(jwt_secret_key.as_bytes()) + .map_err(|e| { + format!( + "Error occurred while writing to jwt_secret_key file: {:?}", + e + ) + })?; + } else { + return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string()); } + // Parse and set the payload builder, if any. + if let Some(endpoint) = cli_args.get_one::("builder") { + let payload_builder = + parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; + el_config.builder_url = Some(payload_builder); + + el_config.builder_user_agent = clap_utils::parse_optional(cli_args, "builder-user-agent")?; + + el_config.builder_header_timeout = + clap_utils::parse_optional(cli_args, "builder-header-timeout")? + .map(Duration::from_millis); + } + + // Set config values from parse values. + el_config.secret_file = Some(secret_file.clone()); + el_config.execution_endpoint = Some(execution_endpoint.clone()); + el_config.suggested_fee_recipient = + clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; + el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; + el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; + el_config + .default_datadir + .clone_from(client_config.data_dir()); + let execution_timeout_multiplier = + clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; + el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); + + client_config.eth1.endpoint = Eth1Endpoint::Auth { + endpoint: execution_endpoint, + jwt_path: secret_file, + jwt_id: el_config.jwt_id.clone(), + jwt_version: el_config.jwt_version.clone(), + }; + + // Store the EL config in the client config. + client_config.execution_layer = Some(el_config); + // 4844 params if let Some(trusted_setup) = context .eth2_network_config @@ -400,13 +393,6 @@ pub fn get_config( client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); } - let (sprp, sprp_explicit) = get_slots_per_restore_point::(clap_utils::parse_optional( - cli_args, - "slots-per-restore-point", - )?)?; - client_config.store.slots_per_restore_point = sprp; - client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; - if let Some(block_cache_size) = cli_args.get_one::("block-cache-size") { client_config.store.block_cache_size = block_cache_size .parse() @@ -419,11 +405,16 @@ pub fn get_config( .map_err(|_| "state-cache-size is not a valid integer".to_string())?; } - if let Some(historic_state_cache_size) = cli_args.get_one::("historic-state-cache-size") + if let Some(historic_state_cache_size) = + clap_utils::parse_optional(cli_args, "historic-state-cache-size")? { - client_config.store.historic_state_cache_size = historic_state_cache_size - .parse() - .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; + client_config.store.historic_state_cache_size = historic_state_cache_size; + } + + if let Some(hdiff_buffer_cache_size) = + clap_utils::parse_optional(cli_args, "hdiff-buffer-cache-size")? + { + client_config.store.hdiff_buffer_cache_size = hdiff_buffer_cache_size; } client_config.store.compact_on_init = cli_args.get_flag("compact-db"); @@ -437,6 +428,14 @@ pub fn get_config( client_config.store.prune_payloads = prune_payloads; } + if clap_utils::parse_optional::(cli_args, "slots-per-restore-point")?.is_some() { + warn!(log, "The slots-per-restore-point flag is deprecated"); + } + + if let Some(hierarchy_config) = clap_utils::parse_optional(cli_args, "hierarchy-exponents")? { + client_config.store.hierarchy_config = hierarchy_config; + } + if let Some(epochs_per_migration) = clap_utils::parse_optional(cli_args, "epochs-per-migration")? { @@ -1484,23 +1483,6 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { .unwrap_or_else(|| PathBuf::from(".")) } -/// Get the `slots_per_restore_point` value to use for the database. -/// -/// Return `(sprp, set_explicitly)` where `set_explicitly` is `true` if the user provided the value. -pub fn get_slots_per_restore_point( - slots_per_restore_point: Option, -) -> Result<(u64, bool), String> { - if let Some(slots_per_restore_point) = slots_per_restore_point { - Ok((slots_per_restore_point, true)) - } else { - let default = std::cmp::min( - E::slots_per_historical_root() as u64, - store::config::DEFAULT_SLOTS_PER_RESTORE_POINT, - ); - Ok((default, false)) - } -} - /// Parses the `cli_value` as a comma-separated string of values to be parsed with `parser`. /// /// If there is more than one value, log a warning. If there are no values, return an error. diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 5bc0f9dc6a..cca617d8c6 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -9,7 +9,7 @@ use beacon_chain::{ use clap::ArgMatches; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_config, get_data_dir, get_slots_per_restore_point, set_network_config}; +pub use config::{get_config, get_data_dir, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; use slasher::{DatabaseBackendOverride, Slasher}; @@ -140,7 +140,7 @@ impl ProductionBeaconNode { let builder = builder .beacon_chain_builder(client_genesis, client_config.clone()) .await?; - let builder = if client_config.sync_eth1_chain && !client_config.dummy_eth1_backend { + let builder = if client_config.sync_eth1_chain { info!( log, "Block production enabled"; @@ -150,13 +150,6 @@ impl ProductionBeaconNode { builder .caching_eth1_backend(client_config.eth1.clone()) .await? - } else if client_config.dummy_eth1_backend { - warn!( - log, - "Block production impaired"; - "reason" => "dummy eth1 backend is enabled" - ); - builder.dummy_eth1_backend()? } else { info!( log, diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index cdb18b3b9c..21d0cf8dec 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -5,23 +5,35 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } beacon_chain = { workspace = true } +criterion = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } +tempfile = { workspace = true } [dependencies] +bls = { workspace = true } db-key = "0.0.5" -leveldb = { version = "0.8" } -parking_lot = { workspace = true } -itertools = { workspace = true } +directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -types = { workspace = true } -safe_arith = { workspace = true } -state_processing = { workspace = true } -slog = { workspace = true } -serde = { workspace = true } -lighthouse_metrics = { workspace = true } +itertools = { workspace = true } +leveldb = { version = "0.8" } +logging = { workspace = true } lru = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } sloggers = { workspace = true } -directory = { workspace = true } +smallvec = { workspace = true } +state_processing = { workspace = true } strum = { workspace = true } +superstruct = { workspace = true } +types = { workspace = true } +xdelta3 = { workspace = true } +zstd = { workspace = true } + +[[bench]] +name = "hdiff" +harness = false diff --git a/beacon_node/store/benches/hdiff.rs b/beacon_node/store/benches/hdiff.rs new file mode 100644 index 0000000000..2577f03f66 --- /dev/null +++ b/beacon_node/store/benches/hdiff.rs @@ -0,0 +1,116 @@ +use bls::PublicKeyBytes; +use criterion::{criterion_group, criterion_main, Criterion}; +use rand::Rng; +use ssz::Decode; +use store::{ + hdiff::{HDiff, HDiffBuffer}, + StoreConfig, +}; +use types::{BeaconState, Epoch, Eth1Data, EthSpec, MainnetEthSpec as E, Validator}; + +pub fn all_benches(c: &mut Criterion) { + let spec = E::default_spec(); + let genesis_time = 0; + let eth1_data = Eth1Data::default(); + let mut rng = rand::thread_rng(); + let validator_mutations = 1000; + let validator_additions = 100; + + for n in [1_000_000, 1_500_000, 2_000_000] { + let mut source_state = BeaconState::::new(genesis_time, eth1_data.clone(), &spec); + + for _ in 0..n { + append_validator(&mut source_state, &mut rng); + } + + let mut target_state = source_state.clone(); + // Change all balances + for i in 0..n { + let balance = target_state.balances_mut().get_mut(i).unwrap(); + *balance += rng.gen_range(1..=1_000_000); + } + // And some validator records + for _ in 0..validator_mutations { + let index = rng.gen_range(1..n); + // TODO: Only change a few things, and not the pubkey + *target_state.validators_mut().get_mut(index).unwrap() = rand_validator(&mut rng); + } + for _ in 0..validator_additions { + append_validator(&mut target_state, &mut rng); + } + + bench_against_states( + c, + source_state, + target_state, + &format!("n={n} v_mut={validator_mutations} v_add={validator_additions}"), + ); + } +} + +fn bench_against_states( + c: &mut Criterion, + source_state: BeaconState, + target_state: BeaconState, + id: &str, +) { + let slot_diff = target_state.slot() - source_state.slot(); + let config = StoreConfig::default(); + let source = HDiffBuffer::from_state(source_state); + let target = HDiffBuffer::from_state(target_state); + let diff = HDiff::compute(&source, &target, &config).unwrap(); + println!( + "state slot diff {slot_diff} - diff size {id} {}", + diff.size() + ); + + c.bench_function(&format!("compute hdiff {id}"), |b| { + b.iter(|| { + HDiff::compute(&source, &target, &config).unwrap(); + }) + }); + c.bench_function(&format!("apply hdiff {id}"), |b| { + b.iter(|| { + let mut source = source.clone(); + diff.apply(&mut source, &config).unwrap(); + }) + }); +} + +fn rand_validator(mut rng: impl Rng) -> Validator { + let mut pubkey = [0u8; 48]; + rng.fill_bytes(&mut pubkey); + let withdrawal_credentials: [u8; 32] = rng.gen(); + + Validator { + pubkey: PublicKeyBytes::from_ssz_bytes(&pubkey).unwrap(), + withdrawal_credentials: withdrawal_credentials.into(), + slashed: false, + effective_balance: 32_000_000_000, + activation_eligibility_epoch: Epoch::max_value(), + activation_epoch: Epoch::max_value(), + exit_epoch: Epoch::max_value(), + withdrawable_epoch: Epoch::max_value(), + } +} + +fn append_validator(state: &mut BeaconState, mut rng: impl Rng) { + state + .balances_mut() + .push(32_000_000_000 + rng.gen_range(1..=1_000_000_000)) + .unwrap(); + if let Ok(inactivity_scores) = state.inactivity_scores_mut() { + inactivity_scores.push(0).unwrap(); + } + state + .validators_mut() + .push(rand_validator(&mut rng)) + .unwrap(); +} + +criterion_group! { + name = benches; + config = Criterion::default().sample_size(10); + targets = all_benches +} +criterion_main!(benches); diff --git a/beacon_node/store/src/chunk_writer.rs b/beacon_node/store/src/chunk_writer.rs deleted file mode 100644 index 059b812e74..0000000000 --- a/beacon_node/store/src/chunk_writer.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::chunked_vector::{chunk_key, Chunk, ChunkError, Field}; -use crate::{Error, KeyValueStore, KeyValueStoreOp}; -use types::EthSpec; - -/// Buffered writer for chunked vectors (block roots mainly). -pub struct ChunkWriter<'a, F, E, S> -where - F: Field, - E: EthSpec, - S: KeyValueStore, -{ - /// Buffered chunk awaiting writing to disk (always dirty). - chunk: Chunk, - /// Chunk index of `chunk`. - index: usize, - store: &'a S, -} - -impl<'a, F, E, S> ChunkWriter<'a, F, E, S> -where - F: Field, - E: EthSpec, - S: KeyValueStore, -{ - pub fn new(store: &'a S, vindex: usize) -> Result { - let chunk_index = F::chunk_index(vindex); - let chunk = Chunk::load(store, F::column(), &chunk_key(chunk_index))? - .unwrap_or_else(|| Chunk::new(vec![F::Value::default(); F::chunk_size()])); - - Ok(Self { - chunk, - index: chunk_index, - store, - }) - } - - /// Set the value at a given vector index, writing the current chunk and moving on if necessary. - pub fn set( - &mut self, - vindex: usize, - value: F::Value, - batch: &mut Vec, - ) -> Result<(), Error> { - let chunk_index = F::chunk_index(vindex); - - // Advance to the next chunk. - if chunk_index != self.index { - self.write(batch)?; - *self = Self::new(self.store, vindex)?; - } - - let i = vindex % F::chunk_size(); - let existing_value = &self.chunk.values[i]; - - if existing_value == &value || existing_value == &F::Value::default() { - self.chunk.values[i] = value; - Ok(()) - } else { - Err(ChunkError::Inconsistent { - field: F::column(), - chunk_index, - existing_value: format!("{:?}", existing_value), - new_value: format!("{:?}", value), - } - .into()) - } - } - - /// Write the current chunk to disk. - /// - /// Should be called before the writer is dropped, in order to write the final chunk to disk. - pub fn write(&self, batch: &mut Vec) -> Result<(), Error> { - self.chunk.store(F::column(), &chunk_key(self.index), batch) - } -} diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index b3322b5225..8f6682e758 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -56,7 +56,7 @@ where } } -impl<'a, F, E, Hot, Cold> Iterator for ChunkedVectorIter<'a, F, E, Hot, Cold> +impl Iterator for ChunkedVectorIter<'_, F, E, Hot, Cold> where F: Field, E: EthSpec, diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 4450989d59..83b8da2a18 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -322,11 +322,11 @@ macro_rules! field { } field!( - BlockRoots, + BlockRootsChunked, FixedLengthField, Hash256, E::SlotsPerHistoricalRoot, - DBColumn::BeaconBlockRoots, + DBColumn::BeaconBlockRootsChunked, |_| OncePerNSlots { n: 1, activation_slot: Some(Slot::new(0)), @@ -336,11 +336,11 @@ field!( ); field!( - StateRoots, + StateRootsChunked, FixedLengthField, Hash256, E::SlotsPerHistoricalRoot, - DBColumn::BeaconStateRoots, + DBColumn::BeaconStateRootsChunked, |_| OncePerNSlots { n: 1, activation_slot: Some(Slot::new(0)), @@ -859,8 +859,8 @@ mod test { fn test_fixed_length>(_: F, expected: bool) { assert_eq!(F::is_fixed_length(), expected); } - test_fixed_length(BlockRoots, true); - test_fixed_length(StateRoots, true); + test_fixed_length(BlockRootsChunked, true); + test_fixed_length(StateRootsChunked, true); test_fixed_length(HistoricalRoots, false); test_fixed_length(RandaoMixes, true); } @@ -880,12 +880,12 @@ mod test { #[test] fn needs_genesis_value_block_roots() { - needs_genesis_value_once_per_slot(BlockRoots); + needs_genesis_value_once_per_slot(BlockRootsChunked); } #[test] fn needs_genesis_value_state_roots() { - needs_genesis_value_once_per_slot(StateRoots); + needs_genesis_value_once_per_slot(StateRootsChunked); } #[test] diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index d43999d822..4f67530570 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,38 +1,47 @@ -use crate::{DBColumn, Error, StoreItem}; +use crate::hdiff::HierarchyConfig; +use crate::{AnchorInfo, DBColumn, Error, Split, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::io::Write; use std::num::NonZeroUsize; +use superstruct::superstruct; use types::non_zero_usize::new_non_zero_usize; -use types::{EthSpec, MinimalEthSpec}; +use types::EthSpec; +use zstd::Encoder; -pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; -pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; -pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(5); +// Only used in tests. Mainnet sets a higher default on the CLI. +pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; +pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); +pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); +pub const DEFAULT_HDIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); +const EST_COMPRESSION_FACTOR: usize = 2; pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StoreConfig { - /// Number of slots to wait between storing restore points in the freezer database. - pub slots_per_restore_point: u64, - /// Flag indicating whether the `slots_per_restore_point` was set explicitly by the user. - pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: NonZeroUsize, /// Maximum number of states to store in the in-memory state cache. pub state_cache_size: NonZeroUsize, - /// Maximum number of states from freezer database to store in the in-memory state cache. + /// Compression level for blocks, state diffs and other compressed values. + pub compression_level: i32, + /// Maximum number of historic states to store in the in-memory historic state cache. pub historic_state_cache_size: NonZeroUsize, + /// Maximum number of `HDiffBuffer`s to store in memory. + pub hdiff_buffer_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. pub compact_on_prune: bool, /// Whether to prune payloads on initialization and finalization. pub prune_payloads: bool, + /// State diff hierarchy. + pub hierarchy_config: HierarchyConfig, /// Whether to prune blobs older than the blob data availability boundary. pub prune_blobs: bool, /// Frequency of blob pruning in epochs. Default: 1 (every epoch). @@ -43,28 +52,59 @@ pub struct StoreConfig { } /// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +#[superstruct( + variants(V1, V22), + variant_attributes(derive(Debug, Clone, PartialEq, Eq, Encode, Decode)) +)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct OnDiskStoreConfig { + #[superstruct(only(V1))] pub slots_per_restore_point: u64, + /// Prefix byte to future-proof versions of the `OnDiskStoreConfig` post V1 + #[superstruct(only(V22))] + version_byte: u8, + #[superstruct(only(V22))] + pub hierarchy_config: HierarchyConfig, +} + +impl OnDiskStoreConfigV22 { + fn new(hierarchy_config: HierarchyConfig) -> Self { + Self { + version_byte: 22, + hierarchy_config, + } + } } #[derive(Debug, Clone)] pub enum StoreConfigError { - MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 }, + MismatchedSlotsPerRestorePoint { + config: u64, + on_disk: u64, + }, + InvalidCompressionLevel { + level: i32, + }, + IncompatibleStoreConfig { + config: OnDiskStoreConfig, + on_disk: OnDiskStoreConfig, + }, + ZeroEpochsPerBlobPrune, + InvalidVersionByte(Option), } impl Default for StoreConfig { fn default() -> Self { Self { - // Safe default for tests, shouldn't ever be read by a CLI node. - slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, - slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, state_cache_size: DEFAULT_STATE_CACHE_SIZE, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, + hdiff_buffer_cache_size: DEFAULT_HDIFF_BUFFER_CACHE_SIZE, + compression_level: DEFAULT_COMPRESSION_LEVEL, compact_on_init: false, compact_on_prune: true, prune_payloads: true, + hierarchy_config: HierarchyConfig::default(), prune_blobs: true, epochs_per_blob_prune: DEFAULT_EPOCHS_PER_BLOB_PRUNE, blob_prune_margin_epochs: DEFAULT_BLOB_PUNE_MARGIN_EPOCHS, @@ -74,22 +114,90 @@ impl Default for StoreConfig { impl StoreConfig { pub fn as_disk_config(&self) -> OnDiskStoreConfig { - OnDiskStoreConfig { - slots_per_restore_point: self.slots_per_restore_point, - } + OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(self.hierarchy_config.clone())) } pub fn check_compatibility( &self, on_disk_config: &OnDiskStoreConfig, + split: &Split, + anchor: &AnchorInfo, ) -> Result<(), StoreConfigError> { - if self.slots_per_restore_point != on_disk_config.slots_per_restore_point { - return Err(StoreConfigError::MismatchedSlotsPerRestorePoint { - config: self.slots_per_restore_point, - on_disk: on_disk_config.slots_per_restore_point, - }); + // Allow changing the hierarchy exponents if no historic states are stored. + let no_historic_states_stored = anchor.no_historic_states_stored(split.slot); + let hierarchy_config_changed = + if let Ok(on_disk_hierarchy_config) = on_disk_config.hierarchy_config() { + *on_disk_hierarchy_config != self.hierarchy_config + } else { + false + }; + + if hierarchy_config_changed && !no_historic_states_stored { + Err(StoreConfigError::IncompatibleStoreConfig { + config: self.as_disk_config(), + on_disk: on_disk_config.clone(), + }) + } else { + Ok(()) } - Ok(()) + } + + /// Check that the configuration is valid. + pub fn verify(&self) -> Result<(), StoreConfigError> { + self.verify_compression_level()?; + self.verify_epochs_per_blob_prune() + } + + /// Check that the compression level is valid. + fn verify_compression_level(&self) -> Result<(), StoreConfigError> { + if zstd::compression_level_range().contains(&self.compression_level) { + Ok(()) + } else { + Err(StoreConfigError::InvalidCompressionLevel { + level: self.compression_level, + }) + } + } + + /// Check that epochs_per_blob_prune is at least 1 epoch to avoid attempting to prune the same + /// epochs over and over again. + fn verify_epochs_per_blob_prune(&self) -> Result<(), StoreConfigError> { + if self.epochs_per_blob_prune > 0 { + Ok(()) + } else { + Err(StoreConfigError::ZeroEpochsPerBlobPrune) + } + } + + /// Estimate the size of `len` bytes after compression at the current compression level. + pub fn estimate_compressed_size(&self, len: usize) -> usize { + // This is a rough estimate, but for our data it seems that all non-zero compression levels + // provide a similar compression ratio. + if self.compression_level == 0 { + len + } else { + len / EST_COMPRESSION_FACTOR + } + } + + /// Estimate the size of `len` compressed bytes after decompression at the current compression + /// level. + pub fn estimate_decompressed_size(&self, len: usize) -> usize { + if self.compression_level == 0 { + len + } else { + len * EST_COMPRESSION_FACTOR + } + } + + pub fn compress_bytes(&self, ssz_bytes: &[u8]) -> Result, Error> { + let mut compressed_value = + Vec::with_capacity(self.estimate_compressed_size(ssz_bytes.len())); + let mut encoder = Encoder::new(&mut compressed_value, self.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(ssz_bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + Ok(compressed_value) } } @@ -99,10 +207,136 @@ impl StoreItem for OnDiskStoreConfig { } fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + match self { + OnDiskStoreConfig::V1(value) => value.as_ssz_bytes(), + OnDiskStoreConfig::V22(value) => value.as_ssz_bytes(), + } } fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) + // NOTE: V22 config can never be deserialized as a V1 because the minimum length of its + // serialization is: 1 prefix byte + 1 offset (OnDiskStoreConfigV1 container) + + // 1 offset (HierarchyConfig container) = 9. + if let Ok(value) = OnDiskStoreConfigV1::from_ssz_bytes(bytes) { + return Ok(Self::V1(value)); + } + + Ok(Self::V22(OnDiskStoreConfigV22::from_ssz_bytes(bytes)?)) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + metadata::{ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN}, + AnchorInfo, Split, + }; + use ssz::DecodeError; + use types::{Hash256, Slot}; + + #[test] + fn check_compatibility_ok() { + let store_config = StoreConfig { + ..Default::default() + }; + let on_disk_config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new( + store_config.hierarchy_config.clone(), + )); + let split = Split::default(); + assert!(store_config + .check_compatibility(&on_disk_config, &split, &ANCHOR_UNINITIALIZED) + .is_ok()); + } + + #[test] + fn check_compatibility_after_migration() { + let store_config = StoreConfig { + ..Default::default() + }; + let on_disk_config = OnDiskStoreConfig::V1(OnDiskStoreConfigV1 { + slots_per_restore_point: 8192, + }); + let split = Split::default(); + assert!(store_config + .check_compatibility(&on_disk_config, &split, &ANCHOR_UNINITIALIZED) + .is_ok()); + } + + #[test] + fn check_compatibility_hierarchy_config_incompatible() { + let store_config = StoreConfig::default(); + let on_disk_config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(HierarchyConfig { + exponents: vec![5, 8, 11, 13, 16, 18, 21], + })); + let split = Split { + slot: Slot::new(32), + ..Default::default() + }; + assert!(store_config + .check_compatibility(&on_disk_config, &split, &ANCHOR_FOR_ARCHIVE_NODE) + .is_err()); + } + + #[test] + fn check_compatibility_hierarchy_config_update() { + let store_config = StoreConfig { + ..Default::default() + }; + let on_disk_config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(HierarchyConfig { + exponents: vec![5, 8, 11, 13, 16, 18, 21], + })); + let split = Split::default(); + let anchor = AnchorInfo { + anchor_slot: Slot::new(0), + oldest_block_slot: Slot::new(0), + oldest_block_parent: Hash256::ZERO, + state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, + state_lower_limit: Slot::new(0), + }; + assert!(store_config + .check_compatibility(&on_disk_config, &split, &anchor) + .is_ok()); + } + + #[test] + fn serde_on_disk_config_v0_from_v1_default() { + let config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(<_>::default())); + let config_bytes = config.as_store_bytes(); + // On a downgrade, the previous version of lighthouse will attempt to deserialize the + // prefixed V22 as just the V1 version. + assert_eq!( + OnDiskStoreConfigV1::from_ssz_bytes(&config_bytes).unwrap_err(), + DecodeError::InvalidByteLength { + len: 16, + expected: 8 + }, + ); + } + + #[test] + fn serde_on_disk_config_v0_from_v1_empty() { + let config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(HierarchyConfig { + exponents: vec![], + })); + let config_bytes = config.as_store_bytes(); + // On a downgrade, the previous version of lighthouse will attempt to deserialize the + // prefixed V22 as just the V1 version. + assert_eq!( + OnDiskStoreConfigV1::from_ssz_bytes(&config_bytes).unwrap_err(), + DecodeError::InvalidByteLength { + len: 9, + expected: 8 + }, + ); + } + + #[test] + fn serde_on_disk_config_v1_roundtrip() { + let config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(<_>::default())); + let bytes = config.as_store_bytes(); + assert_eq!(bytes[0], 22); + let config_out = OnDiskStoreConfig::from_store_bytes(&bytes).unwrap(); + assert_eq!(config_out, config); } } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index c543a9c4e4..6bb4edee6b 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,9 +1,10 @@ use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; +use crate::{hdiff, DBColumn}; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; +use types::{milhouse, BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; @@ -38,27 +39,35 @@ pub enum Error { /// State reconstruction failed because it didn't reach the upper limit slot. /// /// This should never happen (it's a logic error). - StateReconstructionDidNotComplete, + StateReconstructionLogicError, StateReconstructionRootMismatch { slot: Slot, expected: Hash256, computed: Hash256, }, + MissingGenesisState, + MissingSnapshot(Slot), BlockReplayError(BlockReplayError), - AddPayloadLogicError, - SlotClockUnavailableForMigration, - InvalidKey, - InvalidBytes, - UnableToDowngrade, - InconsistentFork(InconsistentFork), - CacheBuildError(EpochCacheError), - RandaoMixOutOfBounds, + MilhouseError(milhouse::Error), + Compression(std::io::Error), FinalizedStateDecreasingSlot, FinalizedStateUnaligned, StateForCacheHasPendingUpdates { state_root: Hash256, slot: Slot, }, + AddPayloadLogicError, + InvalidKey, + InvalidBytes, + InconsistentFork(InconsistentFork), + Hdiff(hdiff::Error), + CacheBuildError(EpochCacheError), + ForwardsIterInvalidColumn(DBColumn), + ForwardsIterGap(DBColumn, Slot, Slot), + StateShouldNotBeRequired(Slot), + MissingBlock(Hash256), + RandaoMixOutOfBounds, + GenesisStateUnknown, ArithError(safe_arith::ArithError), } @@ -112,6 +121,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + +impl From for Error { + fn from(e: hdiff::Error) -> Self { + Self::Hdiff(e) + } +} + impl From for Error { fn from(e: BlockReplayError) -> Error { Error::BlockReplayError(e) diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 1ccf1da1b7..27769a310a 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -1,37 +1,34 @@ -use crate::chunked_iter::ChunkedVectorIter; -use crate::chunked_vector::{BlockRoots, Field, StateRoots}; use crate::errors::{Error, Result}; use crate::iter::{BlockRootsIterator, StateRootsIterator}; -use crate::{HotColdDB, ItemStore}; +use crate::{ColumnIter, DBColumn, HotColdDB, ItemStore}; use itertools::process_results; -use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; +use std::marker::PhantomData; +use types::{BeaconState, EthSpec, Hash256, Slot}; pub type HybridForwardsBlockRootsIterator<'a, E, Hot, Cold> = - HybridForwardsIterator<'a, E, BlockRoots, Hot, Cold>; + HybridForwardsIterator<'a, E, Hot, Cold>; pub type HybridForwardsStateRootsIterator<'a, E, Hot, Cold> = - HybridForwardsIterator<'a, E, StateRoots, Hot, Cold>; + HybridForwardsIterator<'a, E, Hot, Cold>; -/// Trait unifying `BlockRoots` and `StateRoots` for forward iteration. -pub trait Root: Field { - fn simple_forwards_iterator, Cold: ItemStore>( - store: &HotColdDB, +impl, Cold: ItemStore> HotColdDB { + fn simple_forwards_iterator( + &self, + column: DBColumn, start_slot: Slot, end_state: BeaconState, end_root: Hash256, - ) -> Result; + ) -> Result { + if column == DBColumn::BeaconBlockRoots { + self.forwards_iter_block_roots_using_state(start_slot, end_state, end_root) + } else if column == DBColumn::BeaconStateRoots { + self.forwards_iter_state_roots_using_state(start_slot, end_state, end_root) + } else { + Err(Error::ForwardsIterInvalidColumn(column)) + } + } - /// The first slot for which this field is *no longer* stored in the freezer database. - /// - /// If `None`, then this field is not stored in the freezer database at all due to pruning - /// configuration. - fn freezer_upper_limit, Cold: ItemStore>( - store: &HotColdDB, - ) -> Option; -} - -impl Root for BlockRoots { - fn simple_forwards_iterator, Cold: ItemStore>( - store: &HotColdDB, + fn forwards_iter_block_roots_using_state( + &self, start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, @@ -39,7 +36,7 @@ impl Root for BlockRoots { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( std::iter::once(Ok((end_block_root, end_state.slot()))) - .chain(BlockRootsIterator::owned(store, end_state)), + .chain(BlockRootsIterator::owned(self, end_state)), |iter| { iter.take_while(|(_, slot)| *slot >= start_slot) .collect::>() @@ -48,17 +45,8 @@ impl Root for BlockRoots { Ok(SimpleForwardsIterator { values }) } - fn freezer_upper_limit, Cold: ItemStore>( - store: &HotColdDB, - ) -> Option { - // Block roots are stored for all slots up to the split slot (exclusive). - Some(store.get_split_slot()) - } -} - -impl Root for StateRoots { - fn simple_forwards_iterator, Cold: ItemStore>( - store: &HotColdDB, + fn forwards_iter_state_roots_using_state( + &self, start_slot: Slot, end_state: BeaconState, end_state_root: Hash256, @@ -66,7 +54,7 @@ impl Root for StateRoots { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( std::iter::once(Ok((end_state_root, end_state.slot()))) - .chain(StateRootsIterator::owned(store, end_state)), + .chain(StateRootsIterator::owned(self, end_state)), |iter| { iter.take_while(|(_, slot)| *slot >= start_slot) .collect::>() @@ -75,51 +63,123 @@ impl Root for StateRoots { Ok(SimpleForwardsIterator { values }) } - fn freezer_upper_limit, Cold: ItemStore>( - store: &HotColdDB, - ) -> Option { - // State roots are stored for all slots up to the latest restore point (exclusive). - // There may not be a latest restore point if state pruning is enabled, in which - // case this function will return `None`. - store.get_latest_restore_point_slot() - } -} - -/// Forwards root iterator that makes use of a flat field table in the freezer DB. -pub struct FrozenForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> -{ - inner: ChunkedVectorIter<'a, F, E, Hot, Cold>, -} - -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> - FrozenForwardsIterator<'a, E, F, Hot, Cold> -{ - pub fn new( - store: &'a HotColdDB, + /// Values in `column` are available in the range `start_slot..upper_bound`. + /// + /// If `None` is returned then no values are available from `start_slot` due to pruning or + /// incomplete backfill. + pub fn freezer_upper_bound_for_column( + &self, + column: DBColumn, start_slot: Slot, - last_restore_point_slot: Slot, - spec: &ChainSpec, - ) -> Self { - Self { - inner: ChunkedVectorIter::new( - store, - start_slot.as_usize(), - last_restore_point_slot, - spec, - ), + ) -> Result> { + if column == DBColumn::BeaconBlockRoots { + Ok(self.freezer_upper_bound_for_block_roots(start_slot)) + } else if column == DBColumn::BeaconStateRoots { + Ok(self.freezer_upper_bound_for_state_roots(start_slot)) + } else { + Err(Error::ForwardsIterInvalidColumn(column)) + } + } + + fn freezer_upper_bound_for_block_roots(&self, start_slot: Slot) -> Option { + let oldest_block_slot = self.get_oldest_block_slot(); + if start_slot < oldest_block_slot { + if start_slot == 0 { + // Slot 0 block root is always available. + Some(Slot::new(1)) + // Non-zero block roots are not available prior to the `oldest_block_slot`. + } else { + None + } + } else { + // Block roots are stored for all slots up to the split slot (exclusive). + Some(self.get_split_slot()) + } + } + + fn freezer_upper_bound_for_state_roots(&self, start_slot: Slot) -> Option { + let split_slot = self.get_split_slot(); + let anchor = self.get_anchor_info(); + + if start_slot >= anchor.state_upper_limit { + // Starting slot is after the upper limit, so the split is the upper limit. + // The split state's root is not available in the freezer so this is exclusive. + Some(split_slot) + } else if start_slot <= anchor.state_lower_limit { + // Starting slot is prior to lower limit, so that's the upper limit. We can't + // iterate past the lower limit into the gap. The +1 accounts for exclusivity. + Some(anchor.state_lower_limit + 1) + } else { + // In the gap, nothing is available. + None } } } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> Iterator - for FrozenForwardsIterator<'a, E, F, Hot, Cold> +/// Forwards root iterator that makes use of a slot -> root mapping in the freezer DB. +pub struct FrozenForwardsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { + inner: ColumnIter<'a, Vec>, + column: DBColumn, + next_slot: Slot, + end_slot: Slot, + _phantom: PhantomData<(E, Hot, Cold)>, +} + +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> + FrozenForwardsIterator<'a, E, Hot, Cold> { - type Item = (Hash256, Slot); + /// `end_slot` is EXCLUSIVE here. + pub fn new( + store: &'a HotColdDB, + column: DBColumn, + start_slot: Slot, + end_slot: Slot, + ) -> Result { + if column != DBColumn::BeaconBlockRoots && column != DBColumn::BeaconStateRoots { + return Err(Error::ForwardsIterInvalidColumn(column)); + } + let start = start_slot.as_u64().to_be_bytes(); + Ok(Self { + inner: store.cold_db.iter_column_from(column, &start), + column, + next_slot: start_slot, + end_slot, + _phantom: PhantomData, + }) + } +} + +impl, Cold: ItemStore> Iterator + for FrozenForwardsIterator<'_, E, Hot, Cold> +{ + type Item = Result<(Hash256, Slot)>; fn next(&mut self) -> Option { + if self.next_slot == self.end_slot { + return None; + } self.inner - .next() - .map(|(slot, root)| (root, Slot::from(slot))) + .next()? + .and_then(|(slot_bytes, root_bytes)| { + let slot = slot_bytes + .clone() + .try_into() + .map(u64::from_be_bytes) + .map(Slot::new) + .map_err(|_| Error::InvalidBytes)?; + if root_bytes.len() != std::mem::size_of::() { + return Err(Error::InvalidBytes); + } + let root = Hash256::from_slice(&root_bytes); + + if slot != self.next_slot { + return Err(Error::ForwardsIterGap(self.column, slot, self.next_slot)); + } + self.next_slot += 1; + + Ok(Some((root, slot))) + }) + .transpose() } } @@ -139,10 +199,12 @@ impl Iterator for SimpleForwardsIterator { } /// Fusion of the above two approaches to forwards iteration. Fast and efficient. -pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> { +pub enum HybridForwardsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { PreFinalization { - iter: Box>, + iter: Box>, + store: &'a HotColdDB, end_slot: Option, + column: DBColumn, /// Data required by the `PostFinalization` iterator when we get to it. continuation_data: Option, Hash256)>>, }, @@ -150,6 +212,7 @@ pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, C continuation_data: Option, Hash256)>>, store: &'a HotColdDB, start_slot: Slot, + column: DBColumn, }, PostFinalization { iter: SimpleForwardsIterator, @@ -157,8 +220,8 @@ pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, C Finished, } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> - HybridForwardsIterator<'a, E, F, Hot, Cold> +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> + HybridForwardsIterator<'a, E, Hot, Cold> { /// Construct a new hybrid iterator. /// @@ -174,48 +237,54 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> /// function may block for some time while `get_state` runs. pub fn new( store: &'a HotColdDB, + column: DBColumn, start_slot: Slot, end_slot: Option, get_state: impl FnOnce() -> Result<(BeaconState, Hash256)>, - spec: &ChainSpec, ) -> Result { use HybridForwardsIterator::*; // First slot at which this field is *not* available in the freezer. i.e. all slots less // than this slot have their data available in the freezer. - let freezer_upper_limit = F::freezer_upper_limit(store).unwrap_or(Slot::new(0)); + let opt_freezer_upper_bound = store.freezer_upper_bound_for_column(column, start_slot)?; - let result = if start_slot < freezer_upper_limit { - let iter = Box::new(FrozenForwardsIterator::new( - store, - start_slot, - freezer_upper_limit, - spec, - )); + match opt_freezer_upper_bound { + Some(freezer_upper_bound) if start_slot < freezer_upper_bound => { + // EXCLUSIVE end slot for the frozen portion of the iterator. + let frozen_end_slot = end_slot.map_or(freezer_upper_bound, |end_slot| { + std::cmp::min(end_slot + 1, freezer_upper_bound) + }); + let iter = Box::new(FrozenForwardsIterator::new( + store, + column, + start_slot, + frozen_end_slot, + )?); - // No continuation data is needed if the forwards iterator plans to halt before - // `end_slot`. If it tries to continue further a `NoContinuationData` error will be - // returned. - let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_limit) { - None - } else { - Some(Box::new(get_state()?)) - }; - PreFinalization { - iter, - end_slot, - continuation_data, + // No continuation data is needed if the forwards iterator plans to halt before + // `end_slot`. If it tries to continue further a `NoContinuationData` error will be + // returned. + let continuation_data = + if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_bound) { + None + } else { + Some(Box::new(get_state()?)) + }; + Ok(PreFinalization { + iter, + store, + end_slot, + column, + continuation_data, + }) } - } else { - PostFinalizationLazy { + _ => Ok(PostFinalizationLazy { continuation_data: Some(Box::new(get_state()?)), store, start_slot, - } - }; - - Ok(result) + column, + }), + } } fn do_next(&mut self) -> Result> { @@ -225,29 +294,31 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> PreFinalization { iter, end_slot, + store, continuation_data, + column, } => { match iter.next() { - Some(x) => Ok(Some(x)), + Some(x) => x.map(Some), // Once the pre-finalization iterator is consumed, transition // to a post-finalization iterator beginning from the last slot // of the pre iterator. None => { // If the iterator has an end slot (inclusive) which has already been // covered by the (exclusive) frozen forwards iterator, then we're done! - let iter_end_slot = Slot::from(iter.inner.end_vindex); - if end_slot.map_or(false, |end_slot| iter_end_slot == end_slot + 1) { + if end_slot.map_or(false, |end_slot| iter.end_slot == end_slot + 1) { *self = Finished; return Ok(None); } let continuation_data = continuation_data.take(); - let store = iter.inner.store; - let start_slot = iter_end_slot; + let start_slot = iter.end_slot; + *self = PostFinalizationLazy { continuation_data, store, start_slot, + column: *column, }; self.do_next() @@ -258,11 +329,17 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> continuation_data, store, start_slot, + column, } => { let (end_state, end_root) = *continuation_data.take().ok_or(Error::NoContinuationData)?; *self = PostFinalization { - iter: F::simple_forwards_iterator(store, *start_slot, end_state, end_root)?, + iter: store.simple_forwards_iterator( + *column, + *start_slot, + end_state, + end_root, + )?, }; self.do_next() } @@ -272,8 +349,8 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> } } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> Iterator - for HybridForwardsIterator<'a, E, F, Hot, Cold> +impl, Cold: ItemStore> Iterator + for HybridForwardsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot)>; diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs new file mode 100644 index 0000000000..a29e680eb5 --- /dev/null +++ b/beacon_node/store/src/hdiff.rs @@ -0,0 +1,914 @@ +//! Hierarchical diff implementation. +use crate::{metrics, DBColumn, StoreConfig, StoreItem}; +use bls::PublicKeyBytes; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::cmp::Ordering; +use std::io::{Read, Write}; +use std::ops::RangeInclusive; +use std::str::FromStr; +use std::sync::LazyLock; +use superstruct::superstruct; +use types::historical_summary::HistoricalSummary; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, List, Slot, Validator}; +use zstd::{Decoder, Encoder}; + +static EMPTY_PUBKEY: LazyLock = LazyLock::new(PublicKeyBytes::empty); + +#[derive(Debug)] +pub enum Error { + InvalidHierarchy, + DiffDeletionsNotSupported, + UnableToComputeDiff, + UnableToApplyDiff, + BalancesIncompleteChunk, + Compression(std::io::Error), + InvalidSszState(ssz::DecodeError), + InvalidBalancesLength, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)] +pub struct HierarchyConfig { + /// A sequence of powers of two to define how frequently to store each layer of state diffs. + /// The last value always represents the frequency of full state snapshots. Adding more + /// exponents increases the number of diff layers. This value allows to customize the trade-off + /// between reconstruction speed and disk space. + /// + /// Consider an example `exponents value of `[5,13,21]`. This means we have 3 layers: + /// - Full state stored every 2^21 slots (2097152 slots or 291 days) + /// - First diff layer stored every 2^13 slots (8192 slots or 2.3 hours) + /// - Second diff layer stored every 2^5 slots (32 slots or 1 epoch) + /// + /// To reconstruct a state at slot 3,000,003 we load each closest layer + /// - Layer 0: 3000003 - (3000003 mod 2^21) = 2097152 + /// - Layer 1: 3000003 - (3000003 mod 2^13) = 2998272 + /// - Layer 2: 3000003 - (3000003 mod 2^5) = 3000000 + /// + /// Layer 0 is full state snapshot, apply layer 1 diff, then apply layer 2 diff and then replay + /// blocks 3,000,001 to 3,000,003. + pub exponents: Vec, +} + +impl FromStr for HierarchyConfig { + type Err = String; + + fn from_str(s: &str) -> Result { + let exponents = s + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("invalid hierarchy-exponents: {e:?}")) + }) + .collect::, _>>()?; + + if exponents.windows(2).any(|w| w[0] >= w[1]) { + return Err("hierarchy-exponents must be in ascending order".to_string()); + } + + Ok(HierarchyConfig { exponents }) + } +} + +impl std::fmt::Display for HierarchyConfig { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.exponents.iter().join(",")) + } +} + +#[derive(Debug)] +pub struct HierarchyModuli { + moduli: Vec, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum StorageStrategy { + ReplayFrom(Slot), + DiffFrom(Slot), + Snapshot, +} + +/// Hierarchical diff output and working buffer. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct HDiffBuffer { + state: Vec, + balances: Vec, + inactivity_scores: Vec, + validators: Vec, + historical_roots: Vec, + historical_summaries: Vec, +} + +/// Hierarchical state diff. +/// +/// Splits the diff into two data sections: +/// +/// - **balances**: The balance of each active validator is almost certain to change every epoch. +/// So this is the field in the state with most entropy. However the balance changes are small. +/// We can optimize the diff significantly by computing the balance difference first and then +/// compressing the result to squash those leading zero bytes. +/// +/// - **everything else**: Instead of trying to apply heuristics and be clever on each field, +/// running a generic binary diff algorithm on the rest of fields yields very good results. With +/// this strategy the HDiff code is easily mantainable across forks, as new fields are covered +/// automatically. xdelta3 algorithm showed diff compute and apply times of ~200 ms on a mainnet +/// state from Apr 2023 (570k indexes), and a 92kB diff size. +#[superstruct( + variants(V0), + variant_attributes(derive(Debug, PartialEq, Encode, Decode)) +)] +#[derive(Debug, PartialEq, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +pub struct HDiff { + state_diff: BytesDiff, + balances_diff: CompressedU64Diff, + /// inactivity_scores are small integers that change slowly epoch to epoch. And are 0 for all + /// participants unless there's non-finality. Computing the diff and compressing the result is + /// much faster than running them through a binary patch algorithm. In the default case where + /// all values are 0 it should also result in a tiny output. + inactivity_scores_diff: CompressedU64Diff, + /// The validators array represents the vast majority of data in a BeaconState. Due to its big + /// size we have seen the performance of xdelta3 degrade. Comparing each entry of the + /// validators array manually significantly speeds up the computation of the diff (+10x faster) + /// and result in the same minimal diff. As the `Validator` record is unlikely to change, + /// maintaining this extra complexity should be okay. + validators_diff: ValidatorsDiff, + /// `historical_roots` is an unbounded forever growing (after Capella it's + /// historical_summaries) list of unique roots. This data is pure entropy so there's no point + /// in compressing it. As it's an append only list, the optimal diff + compression is just the + /// list of new entries. The size of `historical_roots` and `historical_summaries` in + /// non-trivial ~10 MB so throwing it to xdelta3 adds CPU cycles. With a bit of extra complexity + /// we can save those completely. + historical_roots: AppendOnlyDiff, + /// See historical_roots + historical_summaries: AppendOnlyDiff, +} + +#[derive(Debug, PartialEq, Encode, Decode)] +pub struct BytesDiff { + bytes: Vec, +} + +#[derive(Debug, PartialEq, Encode, Decode)] +pub struct CompressedU64Diff { + bytes: Vec, +} + +#[derive(Debug, PartialEq, Encode, Decode)] +pub struct ValidatorsDiff { + bytes: Vec, +} + +#[derive(Debug, PartialEq, Encode, Decode)] +pub struct AppendOnlyDiff { + values: Vec, +} + +impl HDiffBuffer { + pub fn from_state(mut beacon_state: BeaconState) -> Self { + let _t = metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_FROM_STATE_TIME); + // Set state.balances to empty list, and then serialize state as ssz + let balances_list = std::mem::take(beacon_state.balances_mut()); + let inactivity_scores = if let Ok(inactivity_scores) = beacon_state.inactivity_scores_mut() + { + std::mem::take(inactivity_scores).to_vec() + } else { + // If this state is pre-altair consider the list empty. If the target state + // is post altair, all its items will show up in the diff as is. + vec![] + }; + let validators = std::mem::take(beacon_state.validators_mut()).to_vec(); + let historical_roots = std::mem::take(beacon_state.historical_roots_mut()).to_vec(); + let historical_summaries = + if let Ok(historical_summaries) = beacon_state.historical_summaries_mut() { + std::mem::take(historical_summaries).to_vec() + } else { + // If this state is pre-capella consider the list empty. The diff will + // include all items in the target state. If both states are + // pre-capella the diff will be empty. + vec![] + }; + + let state = beacon_state.as_ssz_bytes(); + let balances = balances_list.to_vec(); + + HDiffBuffer { + state, + balances, + inactivity_scores, + validators, + historical_roots, + historical_summaries, + } + } + + pub fn as_state(&self, spec: &ChainSpec) -> Result, Error> { + let _t = metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_INTO_STATE_TIME); + let mut state = + BeaconState::from_ssz_bytes(&self.state, spec).map_err(Error::InvalidSszState)?; + + *state.balances_mut() = List::try_from_iter(self.balances.iter().copied()) + .map_err(|_| Error::InvalidBalancesLength)?; + + if let Ok(inactivity_scores) = state.inactivity_scores_mut() { + *inactivity_scores = List::try_from_iter(self.inactivity_scores.iter().copied()) + .map_err(|_| Error::InvalidBalancesLength)?; + } + + *state.validators_mut() = List::try_from_iter(self.validators.iter().cloned()) + .map_err(|_| Error::InvalidBalancesLength)?; + + *state.historical_roots_mut() = List::try_from_iter(self.historical_roots.iter().copied()) + .map_err(|_| Error::InvalidBalancesLength)?; + + if let Ok(historical_summaries) = state.historical_summaries_mut() { + *historical_summaries = List::try_from_iter(self.historical_summaries.iter().copied()) + .map_err(|_| Error::InvalidBalancesLength)?; + } + + Ok(state) + } + + /// Byte size of this instance + pub fn size(&self) -> usize { + self.state.len() + + self.balances.len() * std::mem::size_of::() + + self.inactivity_scores.len() * std::mem::size_of::() + + self.validators.len() * std::mem::size_of::() + + self.historical_roots.len() * std::mem::size_of::() + + self.historical_summaries.len() * std::mem::size_of::() + } +} + +impl HDiff { + pub fn compute( + source: &HDiffBuffer, + target: &HDiffBuffer, + config: &StoreConfig, + ) -> Result { + let state_diff = BytesDiff::compute(&source.state, &target.state)?; + let balances_diff = CompressedU64Diff::compute(&source.balances, &target.balances, config)?; + let inactivity_scores_diff = CompressedU64Diff::compute( + &source.inactivity_scores, + &target.inactivity_scores, + config, + )?; + let validators_diff = + ValidatorsDiff::compute(&source.validators, &target.validators, config)?; + let historical_roots = + AppendOnlyDiff::compute(&source.historical_roots, &target.historical_roots)?; + let historical_summaries = + AppendOnlyDiff::compute(&source.historical_summaries, &target.historical_summaries)?; + + Ok(HDiff::V0(HDiffV0 { + state_diff, + balances_diff, + inactivity_scores_diff, + validators_diff, + historical_roots, + historical_summaries, + })) + } + + pub fn apply(&self, source: &mut HDiffBuffer, config: &StoreConfig) -> Result<(), Error> { + let source_state = std::mem::take(&mut source.state); + self.state_diff().apply(&source_state, &mut source.state)?; + self.balances_diff().apply(&mut source.balances, config)?; + self.inactivity_scores_diff() + .apply(&mut source.inactivity_scores, config)?; + self.validators_diff() + .apply(&mut source.validators, config)?; + self.historical_roots().apply(&mut source.historical_roots); + self.historical_summaries() + .apply(&mut source.historical_summaries); + + Ok(()) + } + + /// Byte size of this instance + pub fn size(&self) -> usize { + self.sizes().iter().sum() + } + + pub fn sizes(&self) -> Vec { + vec![ + self.state_diff().size(), + self.balances_diff().size(), + self.inactivity_scores_diff().size(), + self.validators_diff().size(), + self.historical_roots().size(), + self.historical_summaries().size(), + ] + } +} + +impl StoreItem for HDiff { + fn db_column() -> DBColumn { + DBColumn::BeaconStateDiff + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + +impl BytesDiff { + pub fn compute(source: &[u8], target: &[u8]) -> Result { + Self::compute_xdelta(source, target) + } + + pub fn compute_xdelta(source_bytes: &[u8], target_bytes: &[u8]) -> Result { + let bytes = xdelta3::encode(target_bytes, source_bytes) + .ok_or(Error::UnableToComputeDiff) + .unwrap(); + Ok(Self { bytes }) + } + + pub fn apply(&self, source: &[u8], target: &mut Vec) -> Result<(), Error> { + self.apply_xdelta(source, target) + } + + pub fn apply_xdelta(&self, source: &[u8], target: &mut Vec) -> Result<(), Error> { + *target = xdelta3::decode(&self.bytes, source).ok_or(Error::UnableToApplyDiff)?; + Ok(()) + } + + /// Byte size of this instance + pub fn size(&self) -> usize { + self.bytes.len() + } +} + +impl CompressedU64Diff { + pub fn compute(xs: &[u64], ys: &[u64], config: &StoreConfig) -> Result { + if xs.len() > ys.len() { + return Err(Error::DiffDeletionsNotSupported); + } + + let uncompressed_bytes: Vec = ys + .iter() + .enumerate() + .flat_map(|(i, y)| { + // Diff from 0 if the entry is new. + let x = xs.get(i).copied().unwrap_or(0); + y.wrapping_sub(x).to_be_bytes() + }) + .collect(); + + Ok(CompressedU64Diff { + bytes: compress_bytes(&uncompressed_bytes, config)?, + }) + } + + pub fn apply(&self, xs: &mut Vec, config: &StoreConfig) -> Result<(), Error> { + // Decompress balances diff. + let balances_diff_bytes = uncompress_bytes(&self.bytes, config)?; + + for (i, diff_bytes) in balances_diff_bytes + .chunks(u64::BITS as usize / 8) + .enumerate() + { + let diff = diff_bytes + .try_into() + .map(u64::from_be_bytes) + .map_err(|_| Error::BalancesIncompleteChunk)?; + + if let Some(x) = xs.get_mut(i) { + *x = x.wrapping_add(diff); + } else { + xs.push(diff); + } + } + + Ok(()) + } + + /// Byte size of this instance + pub fn size(&self) -> usize { + self.bytes.len() + } +} + +fn compress_bytes(input: &[u8], config: &StoreConfig) -> Result, Error> { + let compression_level = config.compression_level; + let mut out = Vec::with_capacity(config.estimate_compressed_size(input.len())); + let mut encoder = Encoder::new(&mut out, compression_level).map_err(Error::Compression)?; + encoder.write_all(input).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + Ok(out) +} + +fn uncompress_bytes(input: &[u8], config: &StoreConfig) -> Result, Error> { + let mut out = Vec::with_capacity(config.estimate_decompressed_size(input.len())); + let mut decoder = Decoder::new(input).map_err(Error::Compression)?; + decoder.read_to_end(&mut out).map_err(Error::Compression)?; + Ok(out) +} + +impl ValidatorsDiff { + pub fn compute( + xs: &[Validator], + ys: &[Validator], + config: &StoreConfig, + ) -> Result { + if xs.len() > ys.len() { + return Err(Error::DiffDeletionsNotSupported); + } + + let uncompressed_bytes = ys + .iter() + .enumerate() + .filter_map(|(i, y)| { + let validator_diff = if let Some(x) = xs.get(i) { + if y == x { + return None; + } else { + let pubkey_changed = y.pubkey != x.pubkey; + // Note: If researchers attempt to change the Validator container, go quickly to + // All Core Devs and push hard to add another List in the BeaconState instead. + Validator { + // The pubkey can be changed on index re-use + pubkey: if pubkey_changed { + y.pubkey + } else { + PublicKeyBytes::empty() + }, + // withdrawal_credentials can be set to zero initially but can never be + // changed INTO zero. On index re-use it can be set to zero, but in that + // case the pubkey will also change. + withdrawal_credentials: if pubkey_changed + || y.withdrawal_credentials != x.withdrawal_credentials + { + y.withdrawal_credentials + } else { + Hash256::ZERO + }, + // effective_balance can increase and decrease + effective_balance: y.effective_balance - x.effective_balance, + // slashed can only change from false into true. In an index re-use it can + // switch back to false, but in that case the pubkey will also change. + slashed: y.slashed, + // activation_eligibility_epoch can never be zero under any case. It's + // set to either FAR_FUTURE_EPOCH or get_current_epoch(state) + 1 + activation_eligibility_epoch: if y.activation_eligibility_epoch + != x.activation_eligibility_epoch + { + y.activation_eligibility_epoch + } else { + Epoch::new(0) + }, + // activation_epoch can never be zero under any case. It's + // set to either FAR_FUTURE_EPOCH or epoch + 1 + MAX_SEED_LOOKAHEAD + activation_epoch: if y.activation_epoch != x.activation_epoch { + y.activation_epoch + } else { + Epoch::new(0) + }, + // exit_epoch can never be zero under any case. It's set to either + // FAR_FUTURE_EPOCH or > epoch + 1 + MAX_SEED_LOOKAHEAD + exit_epoch: if y.exit_epoch != x.exit_epoch { + y.exit_epoch + } else { + Epoch::new(0) + }, + // withdrawable_epoch can never be zero under any case. It's set to + // either FAR_FUTURE_EPOCH or > epoch + 1 + MAX_SEED_LOOKAHEAD + withdrawable_epoch: if y.withdrawable_epoch != x.withdrawable_epoch { + y.withdrawable_epoch + } else { + Epoch::new(0) + }, + } + } + } else { + y.clone() + }; + + Some(ValidatorDiffEntry { + index: i as u64, + validator_diff, + }) + }) + .flat_map(|v_diff| v_diff.as_ssz_bytes()) + .collect::>(); + + Ok(Self { + bytes: compress_bytes(&uncompressed_bytes, config)?, + }) + } + + pub fn apply(&self, xs: &mut Vec, config: &StoreConfig) -> Result<(), Error> { + let validator_diff_bytes = uncompress_bytes(&self.bytes, config)?; + + for diff_bytes in + validator_diff_bytes.chunks(::ssz_fixed_len()) + { + let ValidatorDiffEntry { + index, + validator_diff: diff, + } = ValidatorDiffEntry::from_ssz_bytes(diff_bytes) + .map_err(|_| Error::BalancesIncompleteChunk)?; + + if let Some(x) = xs.get_mut(index as usize) { + // Note: a pubkey change implies index re-use. In that case over-write + // withdrawal_credentials and slashed inconditionally as their default values + // are valid values. + let pubkey_changed = diff.pubkey != *EMPTY_PUBKEY; + if pubkey_changed { + x.pubkey = diff.pubkey; + } + if pubkey_changed || diff.withdrawal_credentials != Hash256::ZERO { + x.withdrawal_credentials = diff.withdrawal_credentials; + } + if diff.effective_balance != 0 { + x.effective_balance = x.effective_balance.wrapping_add(diff.effective_balance); + } + if pubkey_changed || diff.slashed { + x.slashed = diff.slashed; + } + if diff.activation_eligibility_epoch != Epoch::new(0) { + x.activation_eligibility_epoch = diff.activation_eligibility_epoch; + } + if diff.activation_epoch != Epoch::new(0) { + x.activation_epoch = diff.activation_epoch; + } + if diff.exit_epoch != Epoch::new(0) { + x.exit_epoch = diff.exit_epoch; + } + if diff.withdrawable_epoch != Epoch::new(0) { + x.withdrawable_epoch = diff.withdrawable_epoch; + } + } else { + xs.push(diff) + } + } + + Ok(()) + } + + /// Byte size of this instance + pub fn size(&self) -> usize { + self.bytes.len() + } +} + +#[derive(Debug, Encode, Decode)] +struct ValidatorDiffEntry { + index: u64, + validator_diff: Validator, +} + +impl AppendOnlyDiff { + pub fn compute(xs: &[T], ys: &[T]) -> Result { + match xs.len().cmp(&ys.len()) { + Ordering::Less => Ok(Self { + values: ys.iter().skip(xs.len()).copied().collect(), + }), + // Don't even create an iterator for this common case + Ordering::Equal => Ok(Self { values: vec![] }), + Ordering::Greater => Err(Error::DiffDeletionsNotSupported), + } + } + + pub fn apply(&self, xs: &mut Vec) { + xs.extend(self.values.iter().copied()); + } + + /// Byte size of this instance + pub fn size(&self) -> usize { + self.values.len() * size_of::() + } +} + +impl Default for HierarchyConfig { + fn default() -> Self { + HierarchyConfig { + exponents: vec![5, 9, 11, 13, 16, 18, 21], + } + } +} + +impl HierarchyConfig { + pub fn to_moduli(&self) -> Result { + self.validate()?; + let moduli = self.exponents.iter().map(|n| 1 << n).collect(); + Ok(HierarchyModuli { moduli }) + } + + pub fn validate(&self) -> Result<(), Error> { + if !self.exponents.is_empty() + && self + .exponents + .iter() + .tuple_windows() + .all(|(small, big)| small < big && *big < u64::BITS as u8) + { + Ok(()) + } else { + Err(Error::InvalidHierarchy) + } + } +} + +impl HierarchyModuli { + pub fn storage_strategy(&self, slot: Slot) -> Result { + // last = full snapshot interval + let last = self.moduli.last().copied().ok_or(Error::InvalidHierarchy)?; + // first = most frequent diff layer, need to replay blocks from this layer + let first = self + .moduli + .first() + .copied() + .ok_or(Error::InvalidHierarchy)?; + + if slot % last == 0 { + return Ok(StorageStrategy::Snapshot); + } + + Ok(self + .moduli + .iter() + .rev() + .tuple_windows() + .find_map(|(&n_big, &n_small)| { + if slot % n_small == 0 { + // Diff from the previous layer. + Some(StorageStrategy::DiffFrom(slot / n_big * n_big)) + } else { + // Keep trying with next layer + None + } + }) + // Exhausted layers, need to replay from most frequent layer + .unwrap_or(StorageStrategy::ReplayFrom(slot / first * first))) + } + + /// Return the smallest slot greater than or equal to `slot` at which a full snapshot should + /// be stored. + pub fn next_snapshot_slot(&self, slot: Slot) -> Result { + let last = self.moduli.last().copied().ok_or(Error::InvalidHierarchy)?; + if slot % last == 0 { + Ok(slot) + } else { + Ok((slot / last + 1) * last) + } + } + + /// Return `true` if the database ops for this slot should be committed immediately. + /// + /// This is the case for all diffs aside from the ones in the leaf layer. To store a diff + /// might require loading the state at the previous layer, in which case the diff for that + /// layer must already have been stored. + /// + /// In future we may be able to handle this differently (with proper transaction semantics + /// rather than LevelDB's "write batches"). + pub fn should_commit_immediately(&self, slot: Slot) -> Result { + // If there's only 1 layer of snapshots, then commit only when writing a snapshot. + self.moduli.get(1).map_or_else( + || Ok(slot == self.next_snapshot_slot(slot)?), + |second_layer_moduli| Ok(slot % *second_layer_moduli == 0), + ) + } +} + +impl StorageStrategy { + /// For the state stored with this `StorageStrategy` at `slot`, return the range of slots which + /// should be checked for ancestor states in the historic state cache. + /// + /// The idea is that for states which need to be built by replaying blocks we should scan + /// for any viable ancestor state between their `from` slot and `slot`. If we find such a + /// state it will save us from the slow reconstruction of the `from` state using diffs. + /// + /// Similarly for `DiffFrom` and `Snapshot` states, loading the prior state and replaying 1 + /// block is often going to be faster than loading and applying diffs/snapshots, so we may as + /// well check the cache for that 1 slot prior (in case the caller is iterating sequentially). + pub fn replay_from_range( + &self, + slot: Slot, + ) -> std::iter::Map, fn(u64) -> Slot> { + match self { + Self::ReplayFrom(from) => from.as_u64()..=slot.as_u64(), + Self::Snapshot | Self::DiffFrom(_) => { + if slot > 0 { + (slot - 1).as_u64()..=slot.as_u64() + } else { + slot.as_u64()..=slot.as_u64() + } + } + } + .map(Slot::from) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng}; + + #[test] + fn default_storage_strategy() { + let config = HierarchyConfig::default(); + config.validate().unwrap(); + + let moduli = config.to_moduli().unwrap(); + + // Full snapshots at multiples of 2^21. + let snapshot_freq = Slot::new(1 << 21); + assert_eq!( + moduli.storage_strategy(Slot::new(0)).unwrap(), + StorageStrategy::Snapshot + ); + assert_eq!( + moduli.storage_strategy(snapshot_freq).unwrap(), + StorageStrategy::Snapshot + ); + assert_eq!( + moduli.storage_strategy(snapshot_freq * 3).unwrap(), + StorageStrategy::Snapshot + ); + + // Diffs should be from the previous layer (the snapshot in this case), and not the previous diff in the same layer. + let first_layer = Slot::new(1 << 18); + assert_eq!( + moduli.storage_strategy(first_layer * 2).unwrap(), + StorageStrategy::DiffFrom(Slot::new(0)) + ); + + let replay_strategy_slot = first_layer + 1; + assert_eq!( + moduli.storage_strategy(replay_strategy_slot).unwrap(), + StorageStrategy::ReplayFrom(first_layer) + ); + } + + #[test] + fn next_snapshot_slot() { + let config = HierarchyConfig::default(); + config.validate().unwrap(); + + let moduli = config.to_moduli().unwrap(); + let snapshot_freq = Slot::new(1 << 21); + + assert_eq!( + moduli.next_snapshot_slot(snapshot_freq).unwrap(), + snapshot_freq + ); + assert_eq!( + moduli.next_snapshot_slot(snapshot_freq + 1).unwrap(), + snapshot_freq * 2 + ); + assert_eq!( + moduli.next_snapshot_slot(snapshot_freq * 2 - 1).unwrap(), + snapshot_freq * 2 + ); + assert_eq!( + moduli.next_snapshot_slot(snapshot_freq * 2).unwrap(), + snapshot_freq * 2 + ); + assert_eq!( + moduli.next_snapshot_slot(snapshot_freq * 100).unwrap(), + snapshot_freq * 100 + ); + } + + #[test] + fn compressed_u64_vs_bytes_diff() { + let x_values = vec![99u64, 55, 123, 6834857, 0, 12]; + let y_values = vec![98u64, 55, 312, 1, 1, 2, 4, 5]; + let config = &StoreConfig::default(); + + let to_bytes = + |nums: &[u64]| -> Vec { nums.iter().flat_map(|x| x.to_be_bytes()).collect() }; + + let x_bytes = to_bytes(&x_values); + let y_bytes = to_bytes(&y_values); + + let u64_diff = CompressedU64Diff::compute(&x_values, &y_values, config).unwrap(); + + let mut y_from_u64_diff = x_values; + u64_diff.apply(&mut y_from_u64_diff, config).unwrap(); + + assert_eq!(y_values, y_from_u64_diff); + + let bytes_diff = BytesDiff::compute(&x_bytes, &y_bytes).unwrap(); + + let mut y_from_bytes = vec![]; + bytes_diff.apply(&x_bytes, &mut y_from_bytes).unwrap(); + + assert_eq!(y_bytes, y_from_bytes); + + // U64 diff wins by more than a factor of 3 + assert!(u64_diff.bytes.len() < 3 * bytes_diff.bytes.len()); + } + + #[test] + fn compressed_validators_diff() { + assert_eq!(::ssz_fixed_len(), 129); + + let mut rng = thread_rng(); + let config = &StoreConfig::default(); + let xs = (0..10) + .map(|_| rand_validator(&mut rng)) + .collect::>(); + let mut ys = xs.clone(); + ys[5] = rand_validator(&mut rng); + ys.push(rand_validator(&mut rng)); + let diff = ValidatorsDiff::compute(&xs, &ys, config).unwrap(); + + let mut xs_out = xs.clone(); + diff.apply(&mut xs_out, config).unwrap(); + assert_eq!(xs_out, ys); + } + + fn rand_validator(mut rng: impl Rng) -> Validator { + let mut pubkey = [0u8; 48]; + rng.fill_bytes(&mut pubkey); + let withdrawal_credentials: [u8; 32] = rng.gen(); + + Validator { + pubkey: PublicKeyBytes::from_ssz_bytes(&pubkey).unwrap(), + withdrawal_credentials: withdrawal_credentials.into(), + slashed: false, + effective_balance: 32_000_000_000, + activation_eligibility_epoch: Epoch::max_value(), + activation_epoch: Epoch::max_value(), + exit_epoch: Epoch::max_value(), + withdrawable_epoch: Epoch::max_value(), + } + } + + // This test checks that the hdiff algorithm doesn't accidentally change between releases. + // If it does, we need to ensure appropriate backwards compatibility measures are implemented + // before this test is updated. + #[test] + fn hdiff_version_stability() { + let mut rng = SmallRng::seed_from_u64(0xffeeccdd00aa); + + let pre_balances = vec![32_000_000_000, 16_000_000_000, 0]; + let post_balances = vec![31_000_000_000, 17_000_000, 0, 0]; + + let pre_inactivity_scores = vec![1, 1, 1]; + let post_inactivity_scores = vec![0, 0, 0, 1]; + + let pre_validators = (0..3).map(|_| rand_validator(&mut rng)).collect::>(); + let post_validators = pre_validators.clone(); + + let pre_historical_roots = vec![Hash256::repeat_byte(0xff)]; + let post_historical_roots = vec![Hash256::repeat_byte(0xff), Hash256::repeat_byte(0xee)]; + + let pre_historical_summaries = vec![HistoricalSummary::default()]; + let post_historical_summaries = pre_historical_summaries.clone(); + + let pre_buffer = HDiffBuffer { + state: vec![0, 1, 2, 3, 3, 2, 1, 0], + balances: pre_balances, + inactivity_scores: pre_inactivity_scores, + validators: pre_validators, + historical_roots: pre_historical_roots, + historical_summaries: pre_historical_summaries, + }; + let post_buffer = HDiffBuffer { + state: vec![0, 1, 3, 2, 2, 3, 1, 1], + balances: post_balances, + inactivity_scores: post_inactivity_scores, + validators: post_validators, + historical_roots: post_historical_roots, + historical_summaries: post_historical_summaries, + }; + + let config = StoreConfig::default(); + let hdiff = HDiff::compute(&pre_buffer, &post_buffer, &config).unwrap(); + let hdiff_ssz = hdiff.as_ssz_bytes(); + + // First byte should match enum version. + assert_eq!(hdiff_ssz[0], 0); + + // Should roundtrip. + assert_eq!(HDiff::from_ssz_bytes(&hdiff_ssz).unwrap(), hdiff); + + // Should roundtrip as V0 with enum selector stripped. + assert_eq!( + HDiff::V0(HDiffV0::from_ssz_bytes(&hdiff_ssz[1..]).unwrap()), + hdiff + ); + + assert_eq!( + hdiff_ssz, + vec![ + 0u8, 24, 0, 0, 0, 49, 0, 0, 0, 85, 0, 0, 0, 114, 0, 0, 0, 127, 0, 0, 0, 163, 0, 0, + 0, 4, 0, 0, 0, 214, 195, 196, 0, 0, 0, 14, 8, 0, 8, 1, 0, 0, 1, 3, 2, 2, 3, 1, 1, + 9, 4, 0, 0, 0, 40, 181, 47, 253, 0, 72, 189, 0, 0, 136, 255, 255, 255, 255, 196, + 101, 54, 0, 255, 255, 255, 252, 71, 86, 198, 64, 0, 1, 0, 59, 176, 4, 4, 0, 0, 0, + 40, 181, 47, 253, 0, 72, 133, 0, 0, 80, 255, 255, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 10, + 192, 2, 4, 0, 0, 0, 40, 181, 47, 253, 32, 0, 1, 0, 0, 4, 0, 0, 0, 238, 238, 238, + 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, + 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 4, 0, 0, 0 + ] + ); + } +} diff --git a/beacon_node/store/src/historic_state_cache.rs b/beacon_node/store/src/historic_state_cache.rs new file mode 100644 index 0000000000..c0e8f8346c --- /dev/null +++ b/beacon_node/store/src/historic_state_cache.rs @@ -0,0 +1,92 @@ +use crate::hdiff::{Error, HDiffBuffer}; +use crate::metrics; +use lru::LruCache; +use std::num::NonZeroUsize; +use types::{BeaconState, ChainSpec, EthSpec, Slot}; + +/// Holds a combination of finalized states in two formats: +/// - `hdiff_buffers`: Format close to an SSZ serialized state for rapid application of diffs on top +/// of it +/// - `states`: Deserialized states for direct use or for rapid application of blocks (replay) +/// +/// An example use: when requesting state data for consecutive slots, this cache allows the node to +/// apply diffs once on the first request, and latter just apply blocks one at a time. +#[derive(Debug)] +pub struct HistoricStateCache { + hdiff_buffers: LruCache, + states: LruCache>, +} + +#[derive(Debug, Default)] +pub struct Metrics { + pub num_hdiff: usize, + pub num_state: usize, + pub hdiff_byte_size: usize, +} + +impl HistoricStateCache { + pub fn new(hdiff_buffer_cache_size: NonZeroUsize, state_cache_size: NonZeroUsize) -> Self { + Self { + hdiff_buffers: LruCache::new(hdiff_buffer_cache_size), + states: LruCache::new(state_cache_size), + } + } + + pub fn get_hdiff_buffer(&mut self, slot: Slot) -> Option { + if let Some(buffer_ref) = self.hdiff_buffers.get(&slot) { + let _timer = metrics::start_timer(&metrics::BEACON_HDIFF_BUFFER_CLONE_TIMES); + Some(buffer_ref.clone()) + } else if let Some(state) = self.states.get(&slot) { + let buffer = HDiffBuffer::from_state(state.clone()); + let _timer = metrics::start_timer(&metrics::BEACON_HDIFF_BUFFER_CLONE_TIMES); + let cloned = buffer.clone(); + drop(_timer); + self.hdiff_buffers.put(slot, cloned); + Some(buffer) + } else { + None + } + } + + pub fn get_state( + &mut self, + slot: Slot, + spec: &ChainSpec, + ) -> Result>, Error> { + if let Some(state) = self.states.get(&slot) { + Ok(Some(state.clone())) + } else if let Some(buffer) = self.hdiff_buffers.get(&slot) { + let state = buffer.as_state(spec)?; + self.states.put(slot, state.clone()); + Ok(Some(state)) + } else { + Ok(None) + } + } + + pub fn put_state(&mut self, slot: Slot, state: BeaconState) { + self.states.put(slot, state); + } + + pub fn put_hdiff_buffer(&mut self, slot: Slot, buffer: HDiffBuffer) { + self.hdiff_buffers.put(slot, buffer); + } + + pub fn put_both(&mut self, slot: Slot, state: BeaconState, buffer: HDiffBuffer) { + self.put_state(slot, state); + self.put_hdiff_buffer(slot, buffer); + } + + pub fn metrics(&self) -> Metrics { + let hdiff_byte_size = self + .hdiff_buffers + .iter() + .map(|(_, buffer)| buffer.size()) + .sum::(); + Metrics { + num_hdiff: self.hdiff_buffers.len(), + num_state: self.states.len(), + hdiff_byte_size, + } + } +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 618d85324b..44dbdc8cdc 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,29 +1,24 @@ -use crate::chunked_vector::{ - store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots, -}; -use crate::config::{ - OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, - PREV_DEFAULT_SLOTS_PER_RESTORE_POINT, -}; +use crate::config::{OnDiskStoreConfig, StoreConfig}; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; +use crate::hdiff::{HDiff, HDiffBuffer, HierarchyModuli, StorageStrategy}; +use crate::historic_state_cache::HistoricStateCache; use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; -use crate::leveldb_store::BytesKey; -use crate::leveldb_store::LevelDB; +use crate::leveldb_store::{BytesKey, LevelDB}; use crate::memory_store::MemoryStore; use crate::metadata::{ AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, PruningCheckpoint, SchemaVersion, - ANCHOR_INFO_KEY, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, - DATA_COLUMN_INFO_KEY, PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, - STATE_UPPER_LIMIT_NO_RETAIN, + ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, + COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, DATA_COLUMN_INFO_KEY, + PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, - KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, + get_data_column_key, get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, + KeyValueStoreOp, StoreItem, StoreOp, }; use crate::{metrics, parse_data_column_key}; -use itertools::process_results; +use itertools::{process_results, Itertools}; use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; @@ -38,14 +33,15 @@ use state_processing::{ }; use std::cmp::min; use std::collections::{HashMap, HashSet}; +use std::io::{Read, Write}; use std::marker::PhantomData; use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; -use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; +use zstd::{Decoder, Encoder}; /// On-disk database that stores finalized states efficiently. /// @@ -59,12 +55,13 @@ pub struct HotColdDB, Cold: ItemStore> { /// greater than or equal are in the hot DB. pub(crate) split: RwLock, /// The starting slots for the range of blocks & states stored in the database. - anchor_info: RwLock>, + anchor_info: RwLock, /// The starting slots for the range of blobs stored in the database. blob_info: RwLock, /// The starting slots for the range of data columns stored in the database. data_column_info: RwLock, pub(crate) config: StoreConfig, + pub(crate) hierarchy: HierarchyModuli, /// Cold database containing compact historical data. pub cold_db: Cold, /// Database containing blobs. If None, store falls back to use `cold_db`. @@ -79,8 +76,11 @@ pub struct HotColdDB, Cold: ItemStore> { /// /// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required. state_cache: Mutex>, - /// LRU cache of replayed states. - historic_state_cache: Mutex>>, + /// Cache of historic states and hierarchical diff buffers. + /// + /// This cache is never pruned. It is only populated in response to historical queries from the + /// HTTP API. + historic_state_cache: Mutex>, /// Chain spec. pub(crate) spec: Arc, /// Logger. @@ -156,22 +156,27 @@ pub enum HotColdDBError { proposed_split_slot: Slot, }, MissingStateToFreeze(Hash256), - MissingRestorePointHash(u64), + MissingRestorePointState(Slot), MissingRestorePoint(Hash256), MissingColdStateSummary(Hash256), MissingHotStateSummary(Hash256), MissingEpochBoundaryState(Hash256), + MissingPrevState(Hash256), MissingSplitState(Hash256, Slot), + MissingStateDiff(Hash256), + MissingHDiff(Slot), MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, + MissingFrozenBlockSlot(Hash256), + MissingFrozenBlock(Slot), + MissingPathToBlobsDatabase, BlobsPreviouslyInDefaultStore, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), BlockReplayBeaconError(BeaconStateError), BlockReplaySlotError(SlotProcessingError), BlockReplayBlockError(BlockProcessingError), - MissingLowerLimitState(Slot), InvalidSlotsPerRestorePoint { slots_per_restore_point: u64, slots_per_historical_root: u64, @@ -197,11 +202,13 @@ impl HotColdDB, MemoryStore> { spec: Arc, log: Logger, ) -> Result, MemoryStore>, Error> { - Self::verify_config(&config)?; + config.verify::()?; + + let hierarchy = config.hierarchy_config.to_moduli()?; let db = HotColdDB { split: RwLock::new(Split::default()), - anchor_info: RwLock::new(None), + anchor_info: RwLock::new(ANCHOR_UNINITIALIZED), blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), cold_db: MemoryStore::open(), @@ -209,8 +216,12 @@ impl HotColdDB, MemoryStore> { hot_db: MemoryStore::open(), block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(StateCache::new(config.state_cache_size)), - historic_state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + historic_state_cache: Mutex::new(HistoricStateCache::new( + config.hdiff_buffer_cache_size, + config.historic_state_cache_size, + )), config, + hierarchy, spec, log, _phantom: PhantomData, @@ -234,51 +245,43 @@ impl HotColdDB, LevelDB> { spec: Arc, log: Logger, ) -> Result, Error> { - Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + config.verify::()?; - let mut db = HotColdDB { + let hierarchy = config.hierarchy_config.to_moduli()?; + + let hot_db = LevelDB::open(hot_path)?; + let anchor_info = RwLock::new(Self::load_anchor_info(&hot_db)?); + + let db = HotColdDB { split: RwLock::new(Split::default()), - anchor_info: RwLock::new(None), + anchor_info, blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), cold_db: LevelDB::open(cold_path)?, blobs_db: LevelDB::open(blobs_db_path)?, - hot_db: LevelDB::open(hot_path)?, + hot_db, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(StateCache::new(config.state_cache_size)), - historic_state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + historic_state_cache: Mutex::new(HistoricStateCache::new( + config.hdiff_buffer_cache_size, + config.historic_state_cache_size, + )), config, + hierarchy, spec, log, _phantom: PhantomData, }; - // Allow the slots-per-restore-point value to stay at the previous default if the config - // uses the new default. Don't error on a failed read because the config itself may need - // migrating. - if let Ok(Some(disk_config)) = db.load_config() { - if !db.config.slots_per_restore_point_set_explicitly - && disk_config.slots_per_restore_point == PREV_DEFAULT_SLOTS_PER_RESTORE_POINT - && db.config.slots_per_restore_point == DEFAULT_SLOTS_PER_RESTORE_POINT - { - debug!( - db.log, - "Ignoring slots-per-restore-point config in favour of on-disk value"; - "config" => db.config.slots_per_restore_point, - "on_disk" => disk_config.slots_per_restore_point, - ); - - // Mutate the in-memory config so that it's compatible. - db.config.slots_per_restore_point = PREV_DEFAULT_SLOTS_PER_RESTORE_POINT; - } - } + // Load the config from disk but don't error on a failed read because the config itself may + // need migrating. + let _ = db.load_config(); // Load the previous split slot from the database (if any). This ensures we can // stop and restart correctly. This needs to occur *before* running any migrations // because some migrations load states and depend on the split. if let Some(split) = db.load_split()? { *db.split.write() = split; - *db.anchor_info.write() = db.load_anchor_info()?; info!( db.log, @@ -371,7 +374,22 @@ impl HotColdDB, LevelDB> { // Ensure that any on-disk config is compatible with the supplied config. if let Some(disk_config) = db.load_config()? { - db.config.check_compatibility(&disk_config)?; + let split = db.get_split_info(); + let anchor = db.get_anchor_info(); + db.config + .check_compatibility(&disk_config, &split, &anchor)?; + + // Inform user if hierarchy config is changing. + if let Ok(hierarchy_config) = disk_config.hierarchy_config() { + if &db.config.hierarchy_config != hierarchy_config { + info!( + db.log, + "Updating historic state config"; + "previous_config" => %hierarchy_config, + "new_config" => %db.config.hierarchy_config, + ); + } + } } db.store_config()?; @@ -426,6 +444,49 @@ impl, Cold: ItemStore> HotColdDB self.state_cache.lock().len() } + pub fn register_metrics(&self) { + let hsc_metrics = self.historic_state_cache.lock().metrics(); + + metrics::set_gauge( + &metrics::STORE_BEACON_BLOCK_CACHE_SIZE, + self.block_cache.lock().block_cache.len() as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_BLOB_CACHE_SIZE, + self.block_cache.lock().blob_cache.len() as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_STATE_CACHE_SIZE, + self.state_cache.lock().len() as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_HISTORIC_STATE_CACHE_SIZE, + hsc_metrics.num_state as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_SIZE, + hsc_metrics.num_hdiff as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_BYTE_SIZE, + hsc_metrics.hdiff_byte_size as i64, + ); + + let anchor_info = self.get_anchor_info(); + metrics::set_gauge( + &metrics::STORE_BEACON_ANCHOR_SLOT, + anchor_info.anchor_slot.as_u64() as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_OLDEST_BLOCK_SLOT, + anchor_info.oldest_block_slot.as_u64() as i64, + ); + metrics::set_gauge( + &metrics::STORE_BEACON_STATE_LOWER_LIMIT, + anchor_info.state_lower_limit.as_u64() as i64, + ); + } + /// Store a block and update the LRU cache. pub fn put_block( &self, @@ -641,15 +702,14 @@ impl, Cold: ItemStore> HotColdDB pub fn get_sync_committee_branch( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; if let Some(bytes) = self .hot_db .get_bytes(column.into(), &block_root.as_ssz_bytes())? { - let sync_committee_branch: FixedVector = - FixedVector::from_ssz_bytes(&bytes)?; + let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -677,7 +737,7 @@ impl, Cold: ItemStore> HotColdDB pub fn store_sync_committee_branch( &self, block_root: Hash256, - sync_committee_branch: &FixedVector, + sync_committee_branch: &MerkleProof, ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( @@ -1004,14 +1064,13 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, - spec: &ChainSpec, ) -> Result> + '_, Error> { HybridForwardsBlockRootsIterator::new( self, + DBColumn::BeaconBlockRoots, start_slot, None, || Ok((end_state, end_block_root)), - spec, ) } @@ -1020,9 +1079,14 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, - spec: &ChainSpec, ) -> Result, Error> { - HybridForwardsBlockRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) + HybridForwardsBlockRootsIterator::new( + self, + DBColumn::BeaconBlockRoots, + start_slot, + Some(end_slot), + get_state, + ) } pub fn forwards_state_roots_iterator( @@ -1030,14 +1094,13 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_state_root: Hash256, end_state: BeaconState, - spec: &ChainSpec, ) -> Result> + '_, Error> { HybridForwardsStateRootsIterator::new( self, + DBColumn::BeaconStateRoots, start_slot, None, || Ok((end_state, end_state_root)), - spec, ) } @@ -1046,9 +1109,14 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, - spec: &ChainSpec, ) -> Result, Error> { - HybridForwardsStateRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) + HybridForwardsStateRootsIterator::new( + self, + DBColumn::BeaconStateRoots, + start_slot, + Some(end_slot), + get_state, + ) } /// Load an epoch boundary state by using the hot state summary look-up. @@ -1074,7 +1142,7 @@ impl, Cold: ItemStore> HotColdDB Some(state_slot) => { let epoch_boundary_slot = state_slot / E::slots_per_epoch() * E::slots_per_epoch(); - self.load_cold_state_by_slot(epoch_boundary_slot) + self.load_cold_state_by_slot(epoch_boundary_slot).map(Some) } None => Ok(None), } @@ -1499,7 +1567,6 @@ impl, Cold: ItemStore> HotColdDB state.build_all_caches(&self.spec)?; let latest_block_root = state.get_latest_block_root(state_root); - let state_slot = state.slot(); if let PutStateOutcome::New = self.state_cache .lock() @@ -1509,13 +1576,14 @@ impl, Cold: ItemStore> HotColdDB self.log, "Cached ancestor state"; "state_root" => ?state_root, - "slot" => state_slot, + "slot" => slot, ); } Ok(()) }; let blocks = self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; + let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); self.replay_blocks( boundary_state, blocks, @@ -1532,48 +1600,142 @@ impl, Cold: ItemStore> HotColdDB } } + pub fn store_cold_state_summary( + &self, + state_root: &Hash256, + slot: Slot, + ops: &mut Vec, + ) -> Result<(), Error> { + ops.push(ColdStateSummary { slot }.as_kv_store_op(*state_root)); + ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconStateRoots.into(), + &slot.as_u64().to_be_bytes(), + ), + state_root.as_slice().to_vec(), + )); + Ok(()) + } + /// Store a pre-finalization state in the freezer database. - /// - /// If the state doesn't lie on a restore point boundary then just its summary will be stored. pub fn store_cold_state( &self, state_root: &Hash256, state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - ops.push(ColdStateSummary { slot: state.slot() }.as_kv_store_op(*state_root)); + self.store_cold_state_summary(state_root, state.slot(), ops)?; - if state.slot() % self.config.slots_per_restore_point != 0 { - return Ok(()); + let slot = state.slot(); + match self.hierarchy.storage_strategy(slot)? { + StorageStrategy::ReplayFrom(from) => { + debug!( + self.log, + "Storing cold state"; + "strategy" => "replay", + "from_slot" => from, + "slot" => state.slot(), + ); + // Already have persisted the state summary, don't persist anything else + } + StorageStrategy::Snapshot => { + debug!( + self.log, + "Storing cold state"; + "strategy" => "snapshot", + "slot" => state.slot(), + ); + self.store_cold_state_as_snapshot(state, ops)?; + } + StorageStrategy::DiffFrom(from) => { + debug!( + self.log, + "Storing cold state"; + "strategy" => "diff", + "from_slot" => from, + "slot" => state.slot(), + ); + self.store_cold_state_as_diff(state, from, ops)?; + } } - trace!( - self.log, - "Creating restore point"; - "slot" => state.slot(), - "state_root" => format!("{:?}", state_root) + Ok(()) + } + + pub fn store_cold_state_as_snapshot( + &self, + state: &BeaconState, + ops: &mut Vec, + ) -> Result<(), Error> { + let bytes = state.as_ssz_bytes(); + let compressed_value = { + let _timer = metrics::start_timer(&metrics::STORE_BEACON_STATE_FREEZER_COMPRESS_TIME); + let mut out = Vec::with_capacity(self.config.estimate_compressed_size(bytes.len())); + let mut encoder = Encoder::new(&mut out, self.config.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(&bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + out + }; + + let key = get_key_for_col( + DBColumn::BeaconStateSnapshot.into(), + &state.slot().as_u64().to_be_bytes(), ); + ops.push(KeyValueStoreOp::PutKeyValue(key, compressed_value)); + Ok(()) + } - // 1. Convert to PartialBeaconState and store that in the DB. - let partial_state = PartialBeaconState::from_state_forgetful(state); - let op = partial_state.as_kv_store_op(*state_root); - ops.push(op); + fn load_cold_state_bytes_as_snapshot(&self, slot: Slot) -> Result>, Error> { + match self.cold_db.get_bytes( + DBColumn::BeaconStateSnapshot.into(), + &slot.as_u64().to_be_bytes(), + )? { + Some(bytes) => { + let _timer = + metrics::start_timer(&metrics::STORE_BEACON_STATE_FREEZER_DECOMPRESS_TIME); + let mut ssz_bytes = + Vec::with_capacity(self.config.estimate_decompressed_size(bytes.len())); + let mut decoder = Decoder::new(&*bytes).map_err(Error::Compression)?; + decoder + .read_to_end(&mut ssz_bytes) + .map_err(Error::Compression)?; + Ok(Some(ssz_bytes)) + } + None => Ok(None), + } + } - // 2. Store updated vector entries. - // Block roots need to be written here as well as by the `ChunkWriter` in `migrate_db` - // because states may require older block roots, and the writer only stores block roots - // between the previous split point and the new split point. - let db = &self.cold_db; - store_updated_vector(BlockRoots, db, state, &self.spec, ops)?; - store_updated_vector(StateRoots, db, state, &self.spec, ops)?; - store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?; - store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; - store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?; + fn load_cold_state_as_snapshot(&self, slot: Slot) -> Result>, Error> { + Ok(self + .load_cold_state_bytes_as_snapshot(slot)? + .map(|bytes| BeaconState::from_ssz_bytes(&bytes, &self.spec)) + .transpose()?) + } - // 3. Store restore point. - let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; - self.store_restore_point_hash(restore_point_index, *state_root, ops); + pub fn store_cold_state_as_diff( + &self, + state: &BeaconState, + from_slot: Slot, + ops: &mut Vec, + ) -> Result<(), Error> { + // Load diff base state bytes. + let (_, base_buffer) = { + let _t = metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_LOAD_FOR_STORE_TIME); + self.load_hdiff_buffer_for_slot(from_slot)? + }; + let target_buffer = HDiffBuffer::from_state(state.clone()); + let diff = { + let _timer = metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_COMPUTE_TIME); + HDiff::compute(&base_buffer, &target_buffer, &self.config)? + }; + let diff_bytes = diff.as_ssz_bytes(); + let key = get_key_for_col( + DBColumn::BeaconStateDiff.into(), + &state.slot().as_u64().to_be_bytes(), + ); + ops.push(KeyValueStoreOp::PutKeyValue(key, diff_bytes)); Ok(()) } @@ -1582,7 +1744,7 @@ impl, Cold: ItemStore> HotColdDB /// Return `None` if no state with `state_root` lies in the freezer. pub fn load_cold_state(&self, state_root: &Hash256) -> Result>, Error> { match self.load_cold_state_slot(state_root)? { - Some(slot) => self.load_cold_state_by_slot(slot), + Some(slot) => self.load_cold_state_by_slot(slot).map(Some), None => Ok(None), } } @@ -1590,149 +1752,214 @@ impl, Cold: ItemStore> HotColdDB /// Load a pre-finalization state from the freezer database. /// /// Will reconstruct the state if it lies between restore points. - pub fn load_cold_state_by_slot(&self, slot: Slot) -> Result>, Error> { - // Guard against fetching states that do not exist due to gaps in the historic state - // database, which can occur due to checkpoint sync or re-indexing. - // See the comments in `get_historic_state_limits` for more information. - let (lower_limit, upper_limit) = self.get_historic_state_limits(); + pub fn load_cold_state_by_slot(&self, slot: Slot) -> Result, Error> { + let storage_strategy = self.hierarchy.storage_strategy(slot)?; - if slot <= lower_limit || slot >= upper_limit { - if slot % self.config.slots_per_restore_point == 0 { - let restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; - self.load_restore_point_by_index(restore_point_idx) - } else { - self.load_cold_intermediate_state(slot) + // Search for a state from this slot or a recent prior slot in the historic state cache. + let mut historic_state_cache = self.historic_state_cache.lock(); + + let cached_state = itertools::process_results( + storage_strategy + .replay_from_range(slot) + .rev() + .map(|prior_slot| historic_state_cache.get_state(prior_slot, &self.spec)), + |mut iter| iter.find_map(|cached_state| cached_state), + )?; + drop(historic_state_cache); + + if let Some(cached_state) = cached_state { + if cached_state.slot() == slot { + metrics::inc_counter(&metrics::STORE_BEACON_HISTORIC_STATE_CACHE_HIT); + return Ok(cached_state); } - .map(Some) - } else { - Ok(None) - } - } + metrics::inc_counter(&metrics::STORE_BEACON_HISTORIC_STATE_CACHE_MISS); - /// Load a restore point state by its `state_root`. - fn load_restore_point(&self, state_root: &Hash256) -> Result, Error> { - let partial_state_bytes = self - .cold_db - .get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? - .ok_or(HotColdDBError::MissingRestorePoint(*state_root))?; - let mut partial_state: PartialBeaconState = - PartialBeaconState::from_ssz_bytes(&partial_state_bytes, &self.spec)?; - - // Fill in the fields of the partial state. - partial_state.load_block_roots(&self.cold_db, &self.spec)?; - partial_state.load_state_roots(&self.cold_db, &self.spec)?; - partial_state.load_historical_roots(&self.cold_db, &self.spec)?; - partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; - partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; - - let mut state: BeaconState = partial_state.try_into()?; - state.apply_pending_mutations()?; - Ok(state) - } - - /// Load a restore point state by its `restore_point_index`. - fn load_restore_point_by_index( - &self, - restore_point_index: u64, - ) -> Result, Error> { - let state_root = self.load_restore_point_hash(restore_point_index)?; - self.load_restore_point(&state_root) - } - - /// Load a frozen state that lies between restore points. - fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { - if let Some(state) = self.historic_state_cache.lock().get(&slot) { - return Ok(state.clone()); + return self.load_cold_state_by_slot_using_replay(cached_state, slot); } - // 1. Load the restore points either side of the intermediate state. - let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; - let high_restore_point_idx = low_restore_point_idx + 1; + metrics::inc_counter(&metrics::STORE_BEACON_HISTORIC_STATE_CACHE_MISS); - // Use low restore point as the base state. - let mut low_slot: Slot = - Slot::new(low_restore_point_idx * self.config.slots_per_restore_point); - let mut low_state: Option> = None; + // Load using the diff hierarchy. For states that require replay we recurse into this + // function so that we can try to get their pre-state *as a state* rather than an hdiff + // buffer. + match self.hierarchy.storage_strategy(slot)? { + StorageStrategy::Snapshot | StorageStrategy::DiffFrom(_) => { + let buffer_timer = + metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_LOAD_TIME); + let (_, buffer) = self.load_hdiff_buffer_for_slot(slot)?; + drop(buffer_timer); + let state = buffer.as_state(&self.spec)?; - // Try to get a more recent state from the cache to avoid massive blocks replay. - for (s, state) in self.historic_state_cache.lock().iter() { - if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx - && *s < slot - && low_slot < *s - { - low_slot = *s; - low_state = Some(state.clone()); + self.historic_state_cache + .lock() + .put_both(slot, state.clone(), buffer); + Ok(state) + } + StorageStrategy::ReplayFrom(from) => { + // No prior state found in cache (above), need to load by diffing and then + // replaying. + let base_state = self.load_cold_state_by_slot(from)?; + self.load_cold_state_by_slot_using_replay(base_state, slot) } } - - // If low_state is still None, use load_restore_point_by_index to load the state. - let low_state = match low_state { - Some(state) => state, - None => self.load_restore_point_by_index(low_restore_point_idx)?, - }; - - // Acquire the read lock, so that the split can't change while this is happening. - let split = self.split.read_recursive(); - - let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?; - - // 2. Load the blocks from the high restore point back to the low point. - let blocks = self.load_blocks_to_replay( - low_slot, - slot, - self.get_high_restore_point_block_root(&high_restore_point, slot)?, - )?; - - // 3. Replay the blocks on top of the low point. - // Use a forwards state root iterator to avoid doing any tree hashing. - // The state root of the high restore point should never be used, so is safely set to 0. - let state_root_iter = self.forwards_state_roots_iterator_until( - low_slot, - slot, - || Ok((high_restore_point, Hash256::zero())), - &self.spec, - )?; - - let mut state = self.replay_blocks(low_state, blocks, slot, Some(state_root_iter), None)?; - state.apply_pending_mutations()?; - - // If state is not error, put it in the cache. - self.historic_state_cache.lock().put(slot, state.clone()); - - Ok(state) } - /// Get the restore point with the given index, or if it is out of bounds, the split state. - pub(crate) fn get_restore_point( + fn load_cold_state_by_slot_using_replay( &self, - restore_point_idx: u64, - split: &Split, - ) -> Result, Error> { - if restore_point_idx * self.config.slots_per_restore_point >= split.slot.as_u64() { - self.get_state(&split.state_root, Some(split.slot))? - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - )) - .map_err(Into::into) - } else { - self.load_restore_point_by_index(restore_point_idx) - } - } - - /// Get a suitable block root for backtracking from `high_restore_point` to the state at `slot`. - /// - /// Defaults to the block root for `slot`, which *should* be in range. - fn get_high_restore_point_block_root( - &self, - high_restore_point: &BeaconState, + mut base_state: BeaconState, slot: Slot, - ) -> Result { - high_restore_point - .get_block_root(slot) - .or_else(|_| high_restore_point.get_oldest_block_root()) - .copied() - .map_err(HotColdDBError::RestorePointBlockHashError) + ) -> Result, Error> { + if !base_state.all_caches_built() { + // Build all caches and update the historic state cache so that these caches may be used + // at future slots. We do this lazily here rather than when populating the cache in + // order to speed up queries at snapshot/diff slots, which are already slow. + let cache_timer = + metrics::start_timer(&metrics::STORE_BEACON_COLD_BUILD_BEACON_CACHES_TIME); + base_state.build_all_caches(&self.spec)?; + debug!( + self.log, + "Built caches for historic state"; + "target_slot" => slot, + "build_time_ms" => metrics::stop_timer_with_duration(cache_timer).as_millis() + ); + self.historic_state_cache + .lock() + .put_state(base_state.slot(), base_state.clone()); + } + + if base_state.slot() == slot { + return Ok(base_state); + } + + let blocks = self.load_cold_blocks(base_state.slot() + 1, slot)?; + + // Include state root for base state as it is required by block processing to not + // have to hash the state. + let replay_timer = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_COLD_BLOCKS_TIME); + let state_root_iter = + self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { + Err(Error::StateShouldNotBeRequired(slot)) + })?; + let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; + debug!( + self.log, + "Replayed blocks for historic state"; + "target_slot" => slot, + "replay_time_ms" => metrics::stop_timer_with_duration(replay_timer).as_millis() + ); + + self.historic_state_cache + .lock() + .put_state(slot, state.clone()); + Ok(state) + } + + fn load_hdiff_for_slot(&self, slot: Slot) -> Result { + let bytes = { + let _t = metrics::start_timer(&metrics::BEACON_HDIFF_READ_TIMES); + self.cold_db + .get_bytes( + DBColumn::BeaconStateDiff.into(), + &slot.as_u64().to_be_bytes(), + )? + .ok_or(HotColdDBError::MissingHDiff(slot))? + }; + let hdiff = { + let _t = metrics::start_timer(&metrics::BEACON_HDIFF_DECODE_TIMES); + HDiff::from_ssz_bytes(&bytes)? + }; + Ok(hdiff) + } + + /// Returns `HDiffBuffer` for the specified slot, or `HDiffBuffer` for the `ReplayFrom` slot if + /// the diff for the specified slot is not stored. + fn load_hdiff_buffer_for_slot(&self, slot: Slot) -> Result<(Slot, HDiffBuffer), Error> { + if let Some(buffer) = self.historic_state_cache.lock().get_hdiff_buffer(slot) { + debug!( + self.log, + "Hit hdiff buffer cache"; + "slot" => slot + ); + metrics::inc_counter(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT); + return Ok((slot, buffer)); + } + metrics::inc_counter(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_MISS); + + // Load buffer for the previous state. + // This amount of recursion (<10 levels) should be OK. + let t = std::time::Instant::now(); + match self.hierarchy.storage_strategy(slot)? { + // Base case. + StorageStrategy::Snapshot => { + let state = self + .load_cold_state_as_snapshot(slot)? + .ok_or(Error::MissingSnapshot(slot))?; + let buffer = HDiffBuffer::from_state(state.clone()); + + self.historic_state_cache + .lock() + .put_both(slot, state, buffer.clone()); + + let load_time_ms = t.elapsed().as_millis(); + debug!( + self.log, + "Cached state and hdiff buffer"; + "load_time_ms" => load_time_ms, + "slot" => slot + ); + + Ok((slot, buffer)) + } + // Recursive case. + StorageStrategy::DiffFrom(from) => { + let (_buffer_slot, mut buffer) = self.load_hdiff_buffer_for_slot(from)?; + + // Load diff and apply it to buffer. + let diff = self.load_hdiff_for_slot(slot)?; + { + let _timer = + metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_APPLY_TIME); + diff.apply(&mut buffer, &self.config)?; + } + + self.historic_state_cache + .lock() + .put_hdiff_buffer(slot, buffer.clone()); + + let load_time_ms = t.elapsed().as_millis(); + debug!( + self.log, + "Cached hdiff buffer"; + "load_time_ms" => load_time_ms, + "slot" => slot + ); + + Ok((slot, buffer)) + } + StorageStrategy::ReplayFrom(from) => self.load_hdiff_buffer_for_slot(from), + } + } + + /// Load cold blocks between `start_slot` and `end_slot` inclusive. + pub fn load_cold_blocks( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result>, Error> { + let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_COLD_BLOCKS_TIME); + let block_root_iter = + self.forwards_block_roots_iterator_until(start_slot, end_slot, || { + Err(Error::StateShouldNotBeRequired(end_slot)) + })?; + process_results(block_root_iter, |iter| { + iter.map(|(block_root, _slot)| block_root) + .dedup() + .map(|block_root| { + self.get_blinded_block(&block_root)? + .ok_or(Error::MissingBlock(block_root)) + }) + .collect() + })? } /// Load the blocks between `start_slot` and `end_slot` by backtracking from `end_block_hash`. @@ -1745,6 +1972,7 @@ impl, Cold: ItemStore> HotColdDB end_slot: Slot, end_block_hash: Hash256, ) -> Result>>, Error> { + let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_HOT_BLOCKS_TIME); let mut blocks = ParentRootBlockIterator::new(self, end_block_hash) .map(|result| result.map(|(_, block)| block)) // Include the block at the end slot (if any), it needs to be @@ -1787,6 +2015,8 @@ impl, Cold: ItemStore> HotColdDB state_root_iter: Option>>, pre_slot_hook: Option>, ) -> Result, Error> { + metrics::inc_counter_by(&metrics::STORE_BEACON_REPLAYED_BLOCKS, blocks.len() as u64); + let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() .minimal_block_root_verification(); @@ -1917,30 +2147,6 @@ impl, Cold: ItemStore> HotColdDB }; } - /// Fetch the slot of the most recently stored restore point (if any). - pub fn get_latest_restore_point_slot(&self) -> Option { - let split_slot = self.get_split_slot(); - let anchor = self.get_anchor_info(); - - // There are no restore points stored if the state upper limit lies in the hot database, - // and the lower limit is zero. It hasn't been reached yet, and may never be. - if anchor.as_ref().map_or(false, |a| { - a.state_upper_limit >= split_slot && a.state_lower_limit == 0 - }) { - None - } else if let Some(lower_limit) = anchor - .map(|a| a.state_lower_limit) - .filter(|limit| *limit > 0) - { - Some(lower_limit) - } else { - Some( - (split_slot - 1) / self.config.slots_per_restore_point - * self.config.slots_per_restore_point, - ) - } - } - /// Load the database schema version from disk. fn load_schema_version(&self) -> Result, Error> { self.hot_db.get(&SCHEMA_VERSION_KEY) @@ -1973,36 +2179,33 @@ impl, Cold: ItemStore> HotColdDB retain_historic_states: bool, ) -> Result { let anchor_slot = block.slot(); - let slots_per_restore_point = self.config.slots_per_restore_point; + // Set the `state_upper_limit` to the slot of the *next* checkpoint. + let next_snapshot_slot = self.hierarchy.next_snapshot_slot(anchor_slot)?; let state_upper_limit = if !retain_historic_states { STATE_UPPER_LIMIT_NO_RETAIN - } else if anchor_slot % slots_per_restore_point == 0 { - anchor_slot } else { - // Set the `state_upper_limit` to the slot of the *next* restore point. - // See `get_state_upper_limit` for rationale. - (anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point + next_snapshot_slot }; let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 { // Genesis archive node: no anchor because we *will* store all states. - None + ANCHOR_FOR_ARCHIVE_NODE } else { - Some(AnchorInfo { + AnchorInfo { anchor_slot, oldest_block_slot: anchor_slot, oldest_block_parent: block.parent_root(), state_upper_limit, state_lower_limit: self.spec.genesis_slot, - }) + } }; - self.compare_and_set_anchor_info(None, anchor_info) + self.compare_and_set_anchor_info(ANCHOR_UNINITIALIZED, anchor_info) } /// Get a clone of the store's anchor info. /// /// To do mutations, use `compare_and_set_anchor_info`. - pub fn get_anchor_info(&self) -> Option { + pub fn get_anchor_info(&self) -> AnchorInfo { self.anchor_info.read_recursive().clone() } @@ -2015,8 +2218,8 @@ impl, Cold: ItemStore> HotColdDB /// is not correct. pub fn compare_and_set_anchor_info( &self, - prev_value: Option, - new_value: Option, + prev_value: AnchorInfo, + new_value: AnchorInfo, ) -> Result { let mut anchor_info = self.anchor_info.write(); if *anchor_info == prev_value { @@ -2031,39 +2234,26 @@ impl, Cold: ItemStore> HotColdDB /// As for `compare_and_set_anchor_info`, but also writes the anchor to disk immediately. pub fn compare_and_set_anchor_info_with_write( &self, - prev_value: Option, - new_value: Option, + prev_value: AnchorInfo, + new_value: AnchorInfo, ) -> Result<(), Error> { let kv_store_op = self.compare_and_set_anchor_info(prev_value, new_value)?; self.hot_db.do_atomically(vec![kv_store_op]) } - /// Load the anchor info from disk, but do not set `self.anchor_info`. - fn load_anchor_info(&self) -> Result, Error> { - self.hot_db.get(&ANCHOR_INFO_KEY) + /// Load the anchor info from disk. + fn load_anchor_info(hot_db: &Hot) -> Result { + Ok(hot_db + .get(&ANCHOR_INFO_KEY)? + .unwrap_or(ANCHOR_UNINITIALIZED)) } /// Store the given `anchor_info` to disk. /// /// The argument is intended to be `self.anchor_info`, but is passed manually to avoid issues /// with recursive locking. - fn store_anchor_info_in_batch(&self, anchor_info: &Option) -> KeyValueStoreOp { - if let Some(ref anchor_info) = anchor_info { - anchor_info.as_kv_store_op(ANCHOR_INFO_KEY) - } else { - KeyValueStoreOp::DeleteKey(get_key_for_col( - DBColumn::BeaconMeta.into(), - ANCHOR_INFO_KEY.as_slice(), - )) - } - } - - /// If an anchor exists, return its `anchor_slot` field. - pub fn get_anchor_slot(&self) -> Option { - self.anchor_info - .read_recursive() - .as_ref() - .map(|a| a.anchor_slot) + fn store_anchor_info_in_batch(&self, anchor_info: &AnchorInfo) -> KeyValueStoreOp { + anchor_info.as_kv_store_op(ANCHOR_INFO_KEY) } /// Initialize the `BlobInfo` when starting from genesis or a checkpoint. @@ -2211,7 +2401,7 @@ impl, Cold: ItemStore> HotColdDB /// instance. pub fn get_historic_state_limits(&self) -> (Slot, Slot) { // If checkpoint sync is used then states in the hot DB will always be available, but may - // become unavailable as finalisation advances due to the lack of a restore point in the + // become unavailable as finalisation advances due to the lack of a snapshot in the // database. For this reason we take the minimum of the split slot and the // restore-point-aligned `state_upper_limit`, which should be set _ahead_ of the checkpoint // slot during initialisation. @@ -2222,20 +2412,16 @@ impl, Cold: ItemStore> HotColdDB // a new restore point will be created at that slot, making all states from 4096 onwards // permanently available. let split_slot = self.get_split_slot(); - self.anchor_info - .read_recursive() - .as_ref() - .map_or((split_slot, self.spec.genesis_slot), |a| { - (a.state_lower_limit, min(a.state_upper_limit, split_slot)) - }) + let anchor = self.anchor_info.read_recursive(); + ( + anchor.state_lower_limit, + min(anchor.state_upper_limit, split_slot), + ) } /// Return the minimum slot such that blocks are available for all subsequent slots. pub fn get_oldest_block_slot(&self) -> Slot { - self.anchor_info - .read_recursive() - .as_ref() - .map_or(self.spec.genesis_slot, |anchor| anchor.oldest_block_slot) + self.anchor_info.read_recursive().oldest_block_slot } /// Return the in-memory configuration used by the database. @@ -2278,32 +2464,6 @@ impl, Cold: ItemStore> HotColdDB self.split.read_recursive().as_kv_store_op(SPLIT_KEY) } - /// Load the state root of a restore point. - fn load_restore_point_hash(&self, restore_point_index: u64) -> Result { - let key = Self::restore_point_key(restore_point_index); - self.cold_db - .get(&key)? - .map(|r: RestorePointHash| r.state_root) - .ok_or_else(|| HotColdDBError::MissingRestorePointHash(restore_point_index).into()) - } - - /// Store the state root of a restore point. - fn store_restore_point_hash( - &self, - restore_point_index: u64, - state_root: Hash256, - ops: &mut Vec, - ) { - let value = &RestorePointHash { state_root }; - let op = value.as_kv_store_op(Self::restore_point_key(restore_point_index)); - ops.push(op); - } - - /// Convert a `restore_point_index` into a database key. - fn restore_point_key(restore_point_index: u64) -> Hash256 { - Hash256::from_low_u64_be(restore_point_index) - } - /// Load a frozen state's slot, given its root. pub fn load_cold_state_slot(&self, state_root: &Hash256) -> Result, Error> { Ok(self @@ -2331,58 +2491,51 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } - /// Verify that a parsed config is valid. - fn verify_config(config: &StoreConfig) -> Result<(), HotColdDBError> { - Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; - Self::verify_epochs_per_blob_prune(config.epochs_per_blob_prune) - } - - /// Check that the restore point frequency is valid. - /// - /// Specifically, check that it is: - /// (1) A divisor of the number of slots per historical root, and - /// (2) Divisible by the number of slots per epoch - /// - /// - /// (1) ensures that we have at least one restore point within range of our state - /// root history when iterating backwards (and allows for more frequent restore points if - /// desired). - /// - /// (2) ensures that restore points align with hot state summaries, making it - /// quick to migrate hot to cold. - fn verify_slots_per_restore_point(slots_per_restore_point: u64) -> Result<(), HotColdDBError> { - let slots_per_historical_root = E::SlotsPerHistoricalRoot::to_u64(); - let slots_per_epoch = E::slots_per_epoch(); - if slots_per_restore_point > 0 - && slots_per_historical_root % slots_per_restore_point == 0 - && slots_per_restore_point % slots_per_epoch == 0 - { - Ok(()) - } else { - Err(HotColdDBError::InvalidSlotsPerRestorePoint { - slots_per_restore_point, - slots_per_historical_root, - slots_per_epoch, - }) - } - } - - // Check that epochs_per_blob_prune is at least 1 epoch to avoid attempting to prune the same - // epochs over and over again. - fn verify_epochs_per_blob_prune(epochs_per_blob_prune: u64) -> Result<(), HotColdDBError> { - if epochs_per_blob_prune > 0 { - Ok(()) - } else { - Err(HotColdDBError::ZeroEpochsPerBlobPrune) - } - } - /// Run a compaction pass to free up space used by deleted states. pub fn compact(&self) -> Result<(), Error> { self.hot_db.compact()?; Ok(()) } + /// Run a compaction pass on the freezer DB to free up space used by deleted states. + pub fn compact_freezer(&self) -> Result<(), Error> { + let current_schema_columns = vec![ + DBColumn::BeaconColdStateSummary, + DBColumn::BeaconStateSnapshot, + DBColumn::BeaconStateDiff, + DBColumn::BeaconStateRoots, + ]; + + // We can remove this once schema V21 has been gone for a while. + let previous_schema_columns = vec![ + DBColumn::BeaconState, + DBColumn::BeaconStateSummary, + DBColumn::BeaconBlockRootsChunked, + DBColumn::BeaconStateRootsChunked, + DBColumn::BeaconRestorePoint, + DBColumn::BeaconHistoricalRoots, + DBColumn::BeaconRandaoMixes, + DBColumn::BeaconHistoricalSummaries, + ]; + let mut columns = current_schema_columns; + columns.extend(previous_schema_columns); + + for column in columns { + info!( + self.log, + "Starting compaction"; + "column" => ?column + ); + self.cold_db.compact_column(column)?; + info!( + self.log, + "Finishing compaction"; + "column" => ?column + ); + } + Ok(()) + } + /// Return `true` if compaction on finalization/pruning is enabled. pub fn compact_on_prune(&self) -> bool { self.config.compact_on_prune @@ -2433,12 +2586,12 @@ impl, Cold: ItemStore> HotColdDB block_root: Hash256, ) -> Result, Error> { let mut ops = vec![]; - let mut block_root_writer = - ChunkWriter::::new(&self.cold_db, start_slot.as_usize())?; - for slot in start_slot.as_usize()..end_slot.as_usize() { - block_root_writer.set(slot, block_root, &mut ops)?; + for slot in start_slot.as_u64()..end_slot.as_u64() { + ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + block_root.as_slice().to_vec(), + )); } - block_root_writer.write(&mut ops)?; Ok(ops) } @@ -2489,7 +2642,7 @@ impl, Cold: ItemStore> HotColdDB "Pruning finalized payloads"; "info" => "you may notice degraded I/O performance while this runs" ); - let anchor_slot = self.get_anchor_info().map(|info| info.anchor_slot); + let anchor_slot = self.get_anchor_info().anchor_slot; let mut ops = vec![]; let mut last_pruned_block_root = None; @@ -2530,7 +2683,7 @@ impl, Cold: ItemStore> HotColdDB ops.push(StoreOp::DeleteExecutionPayload(block_root)); } - if Some(slot) == anchor_slot { + if slot == anchor_slot { info!( self.log, "Payload pruning reached anchor state"; @@ -2637,16 +2790,15 @@ impl, Cold: ItemStore> HotColdDB } // Sanity checks. - if let Some(anchor) = self.get_anchor_info() { - if oldest_blob_slot < anchor.oldest_block_slot { - error!( - self.log, - "Oldest blob is older than oldest block"; - "oldest_blob_slot" => oldest_blob_slot, - "oldest_block_slot" => anchor.oldest_block_slot - ); - return Err(HotColdDBError::BlobPruneLogicError.into()); - } + let anchor = self.get_anchor_info(); + if oldest_blob_slot < anchor.oldest_block_slot { + error!( + self.log, + "Oldest blob is older than oldest block"; + "oldest_blob_slot" => oldest_blob_slot, + "oldest_block_slot" => anchor.oldest_block_slot + ); + return Err(HotColdDBError::BlobPruneLogicError.into()); } // Iterate block roots forwards from the oldest blob slot. @@ -2661,21 +2813,16 @@ impl, Cold: ItemStore> HotColdDB let mut ops = vec![]; let mut last_pruned_block_root = None; - for res in self.forwards_block_roots_iterator_until( - oldest_blob_slot, - end_slot, - || { - let (_, split_state) = self - .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; + for res in self.forwards_block_roots_iterator_until(oldest_blob_slot, end_slot, || { + let (_, split_state) = self + .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; - Ok((split_state, split.block_root)) - }, - &self.spec, - )? { + Ok((split_state, split.block_root)) + })? { let (block_root, slot) = match res { Ok(tuple) => tuple, Err(e) => { @@ -2739,84 +2886,6 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } - /// This function fills in missing block roots between last restore point slot and split - /// slot, if any. - pub fn heal_freezer_block_roots_at_split(&self) -> Result<(), Error> { - let split = self.get_split_info(); - let last_restore_point_slot = (split.slot - 1) / self.config.slots_per_restore_point - * self.config.slots_per_restore_point; - - // Load split state (which has access to block roots). - let (_, split_state) = self - .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; - - let mut batch = vec![]; - let mut chunk_writer = ChunkWriter::::new( - &self.cold_db, - last_restore_point_slot.as_usize(), - )?; - - for slot in (last_restore_point_slot.as_u64()..split.slot.as_u64()).map(Slot::new) { - let block_root = *split_state.get_block_root(slot)?; - chunk_writer.set(slot.as_usize(), block_root, &mut batch)?; - } - chunk_writer.write(&mut batch)?; - self.cold_db.do_atomically(batch)?; - - Ok(()) - } - - pub fn heal_freezer_block_roots_at_genesis(&self) -> Result<(), Error> { - let oldest_block_slot = self.get_oldest_block_slot(); - let split_slot = self.get_split_slot(); - - // Check if backfill has been completed AND the freezer db has data in it - if oldest_block_slot != 0 || split_slot == 0 { - return Ok(()); - } - - let mut block_root_iter = self.forwards_block_roots_iterator_until( - Slot::new(0), - split_slot - 1, - || { - Err(Error::DBError { - message: "Should not require end state".to_string(), - }) - }, - &self.spec, - )?; - - let (genesis_block_root, _) = block_root_iter.next().ok_or_else(|| Error::DBError { - message: "Genesis block root missing".to_string(), - })??; - - let slots_to_fix = itertools::process_results(block_root_iter, |iter| { - iter.take_while(|(block_root, _)| block_root.is_zero()) - .map(|(_, slot)| slot) - .collect::>() - })?; - - let Some(first_slot) = slots_to_fix.first() else { - return Ok(()); - }; - - let mut chunk_writer = - ChunkWriter::::new(&self.cold_db, first_slot.as_usize())?; - let mut ops = vec![]; - for slot in slots_to_fix { - chunk_writer.set(slot.as_usize(), genesis_block_root, &mut ops)?; - } - - chunk_writer.write(&mut ops)?; - self.cold_db.do_atomically(ops)?; - - Ok(()) - } - /// Delete *all* states from the freezer database and update the anchor accordingly. /// /// WARNING: this method deletes the genesis state and replaces it with the provided @@ -2828,46 +2897,49 @@ impl, Cold: ItemStore> HotColdDB genesis_state_root: Hash256, genesis_state: &BeaconState, ) -> Result<(), Error> { - // Make sure there is no missing block roots before pruning - self.heal_freezer_block_roots_at_split()?; - // Update the anchor to use the dummy state upper limit and disable historic state storage. let old_anchor = self.get_anchor_info(); - let new_anchor = if let Some(old_anchor) = old_anchor.clone() { - AnchorInfo { - state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, - state_lower_limit: Slot::new(0), - ..old_anchor.clone() - } - } else { - AnchorInfo { - anchor_slot: Slot::new(0), - oldest_block_slot: Slot::new(0), - oldest_block_parent: Hash256::zero(), - state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, - state_lower_limit: Slot::new(0), - } + let new_anchor = AnchorInfo { + state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, + state_lower_limit: Slot::new(0), + ..old_anchor.clone() }; // Commit the anchor change immediately: if the cold database ops fail they can always be // retried, and we can't do them atomically with this change anyway. - self.compare_and_set_anchor_info_with_write(old_anchor, Some(new_anchor))?; + self.compare_and_set_anchor_info_with_write(old_anchor, new_anchor)?; // Stage freezer data for deletion. Do not bother loading and deserializing values as this // wastes time and is less schema-agnostic. My hope is that this method will be useful for // migrating to the tree-states schema (delete everything in the freezer then start afresh). let mut cold_ops = vec![]; - let columns = [ + let current_schema_columns = vec![ + DBColumn::BeaconColdStateSummary, + DBColumn::BeaconStateSnapshot, + DBColumn::BeaconStateDiff, + DBColumn::BeaconStateRoots, + ]; + + // This function is intended to be able to clean up leftover V21 freezer database stuff in + // the case where the V22 schema upgrade failed *after* commiting the version increment but + // *before* cleaning up the freezer DB. + // + // We can remove this once schema V21 has been gone for a while. + let previous_schema_columns = vec![ DBColumn::BeaconState, DBColumn::BeaconStateSummary, + DBColumn::BeaconBlockRootsChunked, + DBColumn::BeaconStateRootsChunked, DBColumn::BeaconRestorePoint, - DBColumn::BeaconStateRoots, DBColumn::BeaconHistoricalRoots, DBColumn::BeaconRandaoMixes, DBColumn::BeaconHistoricalSummaries, ]; + let mut columns = current_schema_columns; + columns.extend(previous_schema_columns); + for column in columns { for res in self.cold_db.iter_column_keys::>(column) { let key = res?; @@ -2877,20 +2949,9 @@ impl, Cold: ItemStore> HotColdDB ))); } } + let delete_ops = cold_ops.len(); - // XXX: We need to commit the mass deletion here *before* re-storing the genesis state, as - // the current schema performs reads as part of `store_cold_state`. This can be deleted - // once the target schema is tree-states. If the process is killed before the genesis state - // is written this can be fixed by re-running. - info!( - self.log, - "Deleting historic states"; - "num_kv" => cold_ops.len(), - ); - self.cold_db.do_atomically(std::mem::take(&mut cold_ops))?; - - // If we just deleted the the genesis state, re-store it using the *current* schema, which - // may be different from the schema of the genesis state we just deleted. + // If we just deleted the genesis state, re-store it using the current* schema. if self.get_split_slot() > 0 { info!( self.log, @@ -2898,11 +2959,17 @@ impl, Cold: ItemStore> HotColdDB "state_root" => ?genesis_state_root, ); self.store_cold_state(&genesis_state_root, genesis_state, &mut cold_ops)?; - self.cold_db.do_atomically(cold_ops)?; } + info!( + self.log, + "Deleting historic states"; + "delete_ops" => delete_ops, + ); + self.cold_db.do_atomically(cold_ops)?; + // In order to reclaim space, we need to compact the freezer DB as well. - self.cold_db.compact()?; + self.compact_freezer()?; Ok(()) } @@ -2977,7 +3044,6 @@ pub fn migrate_database, Cold: ItemStore>( // boundary (in order for the hot state summary scheme to work). let current_split_slot = store.split.read_recursive().slot; let anchor_info = store.anchor_info.read_recursive().clone(); - let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot); if finalized_state.slot() < current_split_slot { return Err(HotColdDBError::FreezeSlotError { @@ -2994,28 +3060,20 @@ pub fn migrate_database, Cold: ItemStore>( } let mut hot_db_ops = vec![]; - let mut cold_db_ops = vec![]; + let mut cold_db_block_ops = vec![]; let mut epoch_boundary_blocks = HashSet::new(); let mut non_checkpoint_block_roots = HashSet::new(); - // Chunk writer for the linear block roots in the freezer DB. - // Start at the new upper limit because we iterate backwards. - let new_frozen_block_root_upper_limit = finalized_state.slot().as_usize().saturating_sub(1); - let mut block_root_writer = - ChunkWriter::::new(&store.cold_db, new_frozen_block_root_upper_limit)?; - - // 1. Copy all of the states between the new finalized state and the split slot, from the hot DB - // to the cold DB. Delete the execution payloads of these now-finalized blocks. - let state_root_iter = RootsIterator::new(&store, finalized_state); - for maybe_tuple in state_root_iter.take_while(|result| match result { - Ok((_, _, slot)) => { - slot >= ¤t_split_slot - && anchor_slot.map_or(true, |anchor_slot| slot >= &anchor_slot) - } - Err(_) => true, - }) { - let (block_root, state_root, slot) = maybe_tuple?; + // Iterate in descending order until the current split slot + let state_roots = RootsIterator::new(&store, finalized_state) + .take_while(|result| match result { + Ok((_, _, slot)) => *slot >= current_split_slot, + Err(_) => true, + }) + .collect::, _>>()?; + // Then, iterate states in slot ascending order, as they are stored wrt previous states. + for (block_root, state_root, slot) in state_roots.into_iter().rev() { // Delete the execution payload if payload pruning is enabled. At a skipped slot we may // delete the payload for the finalized block itself, but that's OK as we only guarantee // that payloads are present for slots >= the split slot. The payload fetching code is also @@ -3024,6 +3082,15 @@ pub fn migrate_database, Cold: ItemStore>( hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); } + // Store the slot to block root mapping. + cold_db_block_ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconBlockRoots.into(), + &slot.as_u64().to_be_bytes(), + ), + block_root.as_slice().to_vec(), + )); + // At a missed slot, `state_root_iter` will return the block root // from the previous non-missed slot. This ensures that the block root at an // epoch boundary is always a checkpoint block root. We keep track of block roots @@ -3043,40 +3110,36 @@ pub fn migrate_database, Cold: ItemStore>( // Delete the old summary, and the full state if we lie on an epoch boundary. hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - // Store the block root for this slot in the linear array of frozen block roots. - block_root_writer.set(slot.as_usize(), block_root, &mut cold_db_ops)?; - // Do not try to store states if a restore point is yet to be stored, or will never be // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state // which always needs to be copied from the hot DB to the freezer and should not be deleted. - if slot != 0 - && anchor_info - .as_ref() - .map_or(false, |anchor| slot < anchor.state_upper_limit) - { + if slot != 0 && slot < anchor_info.state_upper_limit { debug!(store.log, "Pruning finalized state"; "slot" => slot); - continue; } - // Store a pointer from this state root to its slot, so we can later reconstruct states - // from their state root alone. - let cold_state_summary = ColdStateSummary { slot }; - let op = cold_state_summary.as_kv_store_op(state_root); - cold_db_ops.push(op); + let mut cold_db_ops = vec![]; - if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? + // Only store the cold state if it's on a diff boundary. + // Calling `store_cold_state_summary` instead of `store_cold_state` for those allows us + // to skip loading many hot states. + if matches!( + store.hierarchy.storage_strategy(slot)?, + StorageStrategy::ReplayFrom(..) + ) { + // Store slot -> state_root and state_root -> slot mappings. + store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?; + } else { + let state: BeaconState = store + .get_hot_state(&state_root)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; - - // Commit the batch of cold DB ops whenever a full state is written. Each state stored - // may read the linear fields of previous states stored. - store - .cold_db - .do_atomically(std::mem::take(&mut cold_db_ops))?; } + + // Cold states are diffed with respect to each other, so we need to finish writing previous + // states before storing new ones. + store.cold_db.do_atomically(cold_db_ops)?; } // Prune sync committee branch data for all non checkpoint block roots. @@ -3092,10 +3155,6 @@ pub fn migrate_database, Cold: ItemStore>( hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(block_root)); }); - // Finish writing the block roots and commit the remaining cold DB ops. - block_root_writer.write(&mut cold_db_ops)?; - store.cold_db.do_atomically(cold_db_ops)?; - // Warning: Critical section. We have to take care not to put any of the two databases in an // inconsistent state if the OS process dies at any point during the freezing // procedure. @@ -3105,8 +3164,7 @@ pub fn migrate_database, Cold: ItemStore>( // at any point below but it may happen that some states won't be deleted from the hot database // and will remain there forever. Since dying in these particular few lines should be an // exceedingly rare event, this should be an acceptable tradeoff. - - // Flush to disk all the states that have just been migrated to the cold store. + store.cold_db.do_atomically(cold_db_block_ops)?; store.cold_db.sync()?; { let mut split_guard = store.split.write(); @@ -3252,27 +3310,7 @@ pub(crate) struct ColdStateSummary { impl StoreItem for ColdStateSummary { fn db_column() -> DBColumn { - DBColumn::BeaconStateSummary - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - -/// Struct for storing the state root of a restore point in the database. -#[derive(Debug, Clone, Copy, Default, Encode, Decode)] -struct RestorePointHash { - state_root: Hash256, -} - -impl StoreItem for RestorePointHash { - fn db_column() -> DBColumn { - DBColumn::BeaconRestorePoint + DBColumn::BeaconColdStateSummary } fn as_store_bytes(&self) -> Vec { diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 71dc96d99e..97a88c01c8 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -53,8 +53,8 @@ pub struct StateRootsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore inner: RootsIterator<'a, E, Hot, Cold>, } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone - for StateRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Clone + for StateRootsIterator<'_, E, Hot, Cold> { fn clone(&self) -> Self { Self { @@ -77,8 +77,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> StateRootsIterator<' } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for StateRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for StateRootsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot), Error>; @@ -101,8 +101,8 @@ pub struct BlockRootsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore inner: RootsIterator<'a, E, Hot, Cold>, } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone - for BlockRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Clone + for BlockRootsIterator<'_, E, Hot, Cold> { fn clone(&self) -> Self { Self { @@ -136,8 +136,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockRootsIterator<' } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for BlockRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for BlockRootsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot), Error>; @@ -155,9 +155,7 @@ pub struct RootsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> slot: Slot, } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone - for RootsIterator<'a, E, Hot, Cold> -{ +impl, Cold: ItemStore> Clone for RootsIterator<'_, E, Hot, Cold> { fn clone(&self) -> Self { Self { store: self.store, @@ -232,8 +230,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, E, } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for RootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for RootsIterator<'_, E, Hot, Cold> { /// (block_root, state_root, slot) type Item = Result<(Hash256, Hash256, Slot), Error>; @@ -295,8 +293,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for ParentRootBlockIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for ParentRootBlockIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, SignedBeaconBlock>), Error>; @@ -336,8 +334,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, E, } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for BlockIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for BlockIterator<'_, E, Hot, Cold> { type Item = Result>, Error>; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1d02bfbb3c..09ae9a32dd 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,7 +7,6 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. -mod chunk_writer; pub mod chunked_iter; pub mod chunked_vector; pub mod config; @@ -15,25 +14,25 @@ pub mod consensus_context; pub mod errors; mod forwards_iter; mod garbage_collection; +pub mod hdiff; +pub mod historic_state_cache; pub mod hot_cold_store; mod impls; mod leveldb_store; mod memory_store; pub mod metadata; pub mod metrics; -mod partial_beacon_state; +pub mod partial_beacon_state; pub mod reconstruct; pub mod state_cache; pub mod iter; -pub use self::chunk_writer::ChunkWriter; pub use self::config::StoreConfig; pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; -pub use self::partial_beacon_state::PartialBeaconState; pub use crate::metadata::BlobInfo; pub use errors::Error; pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; @@ -251,6 +250,11 @@ pub enum DBColumn { /// For data related to the database itself. #[strum(serialize = "bma")] BeaconMeta, + /// Data related to blocks. + /// + /// - Key: `Hash256` block root. + /// - Value in hot DB: SSZ-encoded blinded block. + /// - Value in cold DB: 8-byte slot of block. #[strum(serialize = "blk")] BeaconBlock, #[strum(serialize = "blb")] @@ -260,9 +264,21 @@ pub enum DBColumn { /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, - /// For the mapping from state roots to their slots or summaries. + /// For beacon state snapshots in the freezer DB. + #[strum(serialize = "bsn")] + BeaconStateSnapshot, + /// For compact `BeaconStateDiff`s in the freezer DB. + #[strum(serialize = "bsd")] + BeaconStateDiff, + /// Mapping from state root to `HotStateSummary` in the hot DB. + /// + /// Previously this column also served a role in the freezer DB, mapping state roots to + /// `ColdStateSummary`. However that role is now filled by `BeaconColdStateSummary`. #[strum(serialize = "bss")] BeaconStateSummary, + /// Mapping from state root to `ColdStateSummary` in the cold DB. + #[strum(serialize = "bcs")] + BeaconColdStateSummary, /// For the list of temporary states stored during block import, /// and then made non-temporary by the deletion of their state root from this column. #[strum(serialize = "bst")] @@ -281,22 +297,45 @@ pub enum DBColumn { ForkChoice, #[strum(serialize = "pkc")] PubkeyCache, - /// For the table mapping restore point numbers to state roots. + /// For the legacy table mapping restore point numbers to state roots. + /// + /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. #[strum(serialize = "brp")] BeaconRestorePoint, - #[strum(serialize = "bbr")] - BeaconBlockRoots, - #[strum(serialize = "bsr")] + /// Mapping from slot to beacon state root in the freezer DB. + /// + /// This new column was created to replace the previous `bsr` column. The replacement was + /// necessary to guarantee atomicity of the upgrade migration. + #[strum(serialize = "bsx")] BeaconStateRoots, + /// DEPRECATED. This is the previous column for beacon state roots stored by "chunk index". + /// + /// Can be removed once schema v22 is buried by a hard fork. + #[strum(serialize = "bsr")] + BeaconStateRootsChunked, + /// Mapping from slot to beacon block root in the freezer DB. + /// + /// This new column was created to replace the previous `bbr` column. The replacement was + /// necessary to guarantee atomicity of the upgrade migration. + #[strum(serialize = "bbx")] + BeaconBlockRoots, + /// DEPRECATED. This is the previous column for beacon block roots stored by "chunk index". + /// + /// Can be removed once schema v22 is buried by a hard fork. + #[strum(serialize = "bbr")] + BeaconBlockRootsChunked, + /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. #[strum(serialize = "bhr")] BeaconHistoricalRoots, + /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. #[strum(serialize = "brm")] BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, - /// For Optimistically Imported Merge Transition Blocks + /// DEPRECATED. For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, + /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. #[strum(serialize = "bhs")] BeaconHistoricalSummaries, #[strum(serialize = "olc")] @@ -338,6 +377,7 @@ impl DBColumn { | Self::BeaconState | Self::BeaconBlob | Self::BeaconStateSummary + | Self::BeaconColdStateSummary | Self::BeaconStateTemporary | Self::ExecPayload | Self::BeaconChain @@ -349,10 +389,14 @@ impl DBColumn { | Self::DhtEnrs | Self::OptimisticTransitionBlock => 32, Self::BeaconBlockRoots + | Self::BeaconBlockRootsChunked | Self::BeaconStateRoots + | Self::BeaconStateRootsChunked | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries | Self::BeaconRandaoMixes + | Self::BeaconStateSnapshot + | Self::BeaconStateDiff | Self::SyncCommittee | Self::SyncCommitteeBranch | Self::LightClientUpdate => 8, diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 0c93251fe2..3f076a767a 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(21); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(22); // All the keys that get stored under the `BeaconMeta` column. // @@ -21,6 +21,27 @@ pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); +/// The `AnchorInfo` encoding full availability of all historic blocks & states. +pub const ANCHOR_FOR_ARCHIVE_NODE: AnchorInfo = AnchorInfo { + anchor_slot: Slot::new(0), + oldest_block_slot: Slot::new(0), + oldest_block_parent: Hash256::ZERO, + state_upper_limit: Slot::new(0), + state_lower_limit: Slot::new(0), +}; + +/// The `AnchorInfo` encoding an uninitialized anchor. +/// +/// This value should never exist except on initial start-up prior to the anchor being initialised +/// by `init_anchor_info`. +pub const ANCHOR_UNINITIALIZED: AnchorInfo = AnchorInfo { + anchor_slot: Slot::new(u64::MAX), + oldest_block_slot: Slot::new(u64::MAX), + oldest_block_parent: Hash256::ZERO, + state_upper_limit: Slot::new(u64::MAX), + state_lower_limit: Slot::new(0), +}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct SchemaVersion(pub u64); @@ -88,17 +109,47 @@ impl StoreItem for CompactionTimestamp { /// Database parameters relevant to weak subjectivity sync. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize)] pub struct AnchorInfo { - /// The slot at which the anchor state is present and which we cannot revert. + /// The slot at which the anchor state is present and which we cannot revert. Values on start: + /// - Genesis start: 0 + /// - Checkpoint sync: Slot of the finalized checkpoint block + /// + /// Immutable pub anchor_slot: Slot, - /// The slot from which historical blocks are available (>=). + /// All blocks with slots greater than or equal to this value are available in the database. + /// Additionally, the genesis block is always available. + /// + /// Values on start: + /// - Genesis start: 0 + /// - Checkpoint sync: Slot of the finalized checkpoint block + /// + /// Progressively decreases during backfill sync until reaching 0. pub oldest_block_slot: Slot, /// The block root of the next block that needs to be added to fill in the history. /// /// Zero if we know all blocks back to genesis. pub oldest_block_parent: Hash256, - /// The slot from which historical states are available (>=). + /// All states with slots _greater than or equal to_ `min(split.slot, state_upper_limit)` are + /// available in the database. If `state_upper_limit` is higher than `split.slot`, states are + /// not being written to the freezer database. + /// + /// Values on start if state reconstruction is enabled: + /// - Genesis start: 0 + /// - Checkpoint sync: Slot of the next scheduled snapshot + /// + /// Value on start if state reconstruction is disabled: + /// - 2^64 - 1 representing no historic state storage. + /// + /// Immutable until state reconstruction completes. pub state_upper_limit: Slot, - /// The slot before which historical states are available (<=). + /// All states with slots _less than or equal to_ this value are available in the database. + /// The minimum value is 0, indicating that the genesis state is always available. + /// + /// Values on start: + /// - Genesis start: 0 + /// - Checkpoint sync: 0 + /// + /// When full block backfill completes (`oldest_block_slot == 0`) state reconstruction starts and + /// this value will progressively increase until reaching `state_upper_limit`. pub state_lower_limit: Slot, } @@ -109,6 +160,21 @@ impl AnchorInfo { pub fn block_backfill_complete(&self, target_slot: Slot) -> bool { self.oldest_block_slot <= target_slot } + + /// Return true if all historic states are stored, i.e. if state reconstruction is complete. + pub fn all_historic_states_stored(&self) -> bool { + self.state_lower_limit == self.state_upper_limit + } + + /// Return true if no historic states other than genesis are stored in the database. + pub fn no_historic_states_stored(&self, split_slot: Slot) -> bool { + self.state_lower_limit == 0 && self.state_upper_limit >= split_slot + } + + /// Return true if no historic states other than genesis *will ever be stored*. + pub fn full_state_pruning_enabled(&self) -> bool { + self.state_lower_limit == 0 && self.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN + } } impl StoreItem for AnchorInfo { diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 902c440be8..f0dd061790 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; +pub use metrics::{set_gauge, try_create_int_gauge, *}; use directory::size_of_dir; use std::path::Path; @@ -73,6 +73,27 @@ pub static DISK_DB_DELETE_COUNT: LazyLock> = LazyLock::new &["col"], ) }); +/* + * Anchor Info + */ +pub static STORE_BEACON_ANCHOR_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_anchor_slot", + "Current anchor info anchor_slot value", + ) +}); +pub static STORE_BEACON_OLDEST_BLOCK_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_oldest_block_slot", + "Current anchor info oldest_block_slot value", + ) +}); +pub static STORE_BEACON_STATE_LOWER_LIMIT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_state_lower_limit", + "Current anchor info state_lower_limit value", + ) +}); /* * Beacon State */ @@ -130,6 +151,24 @@ pub static BEACON_STATE_WRITE_BYTES: LazyLock> = LazyLock::ne "Total number of beacon state bytes written to the DB", ) }); +pub static BEACON_HDIFF_READ_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_hdiff_read_seconds", + "Time required to read the hierarchical diff bytes from the database", + ) +}); +pub static BEACON_HDIFF_DECODE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_hdiff_decode_seconds", + "Time required to decode hierarchical diff bytes", + ) +}); +pub static BEACON_HDIFF_BUFFER_CLONE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_hdiff_buffer_clone_seconds", + "Time required to clone hierarchical diff buffer bytes", + ) +}); /* * Beacon Block */ @@ -145,12 +184,181 @@ pub static BEACON_BLOCK_CACHE_HIT_COUNT: LazyLock> = LazyLock "Number of hits to the store's block cache", ) }); + +/* + * Caches + */ pub static BEACON_BLOBS_CACHE_HIT_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter( "store_beacon_blobs_cache_hit_total", "Number of hits to the store's blob cache", ) }); +pub static STORE_BEACON_BLOCK_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_block_cache_size", + "Current count of items in beacon store block cache", + ) +}); +pub static STORE_BEACON_BLOB_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_blob_cache_size", + "Current count of items in beacon store blob cache", + ) +}); +pub static STORE_BEACON_STATE_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_state_cache_size", + "Current count of items in beacon store state cache", + ) +}); +pub static STORE_BEACON_HISTORIC_STATE_CACHE_SIZE: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_historic_state_cache_size", + "Current count of states in the historic state cache", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_hdiff_buffer_cache_size", + "Current count of hdiff buffers in the historic state cache", + ) +}); +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_BYTE_SIZE: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "store_beacon_hdiff_buffer_cache_byte_size", + "Memory consumed by hdiff buffers in the historic state cache", + ) + }); +pub static STORE_BEACON_STATE_FREEZER_COMPRESS_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_state_compress_seconds", + "Time taken to compress a state snapshot for the freezer DB", + ) + }); +pub static STORE_BEACON_STATE_FREEZER_DECOMPRESS_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_state_decompress_seconds", + "Time taken to decompress a state snapshot for the freezer DB", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_APPLY_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_hdiff_buffer_apply_seconds", + "Time taken to apply hdiff buffer to a state buffer", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_COMPUTE_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_hdiff_buffer_compute_seconds", + "Time taken to compute hdiff buffer to a state buffer", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_LOAD_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_beacon_hdiff_buffer_load_seconds", + "Time taken to load an hdiff buffer", + ) +}); +pub static STORE_BEACON_HDIFF_BUFFER_LOAD_FOR_STORE_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_hdiff_buffer_load_for_store_seconds", + "Time taken to load an hdiff buffer to store another hdiff", + ) + }); +pub static STORE_BEACON_HISTORIC_STATE_CACHE_HIT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "store_beacon_historic_state_cache_hit_total", + "Total count of historic state cache hits for full states", + ) + }); +pub static STORE_BEACON_HISTORIC_STATE_CACHE_MISS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "store_beacon_historic_state_cache_miss_total", + "Total count of historic state cache misses for full states", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_HIT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "store_beacon_hdiff_buffer_cache_hit_total", + "Total count of hdiff buffer cache hits", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_MISS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "store_beacon_hdiff_buffer_cache_miss_total", + "Total count of hdiff buffer cache miss", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_INTO_STATE_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_hdiff_buffer_into_state_seconds", + "Time taken to recreate a BeaconState from an hdiff buffer", + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_FROM_STATE_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_hdiff_buffer_from_state_seconds", + "Time taken to create an hdiff buffer from a BeaconState", + ) + }); +pub static STORE_BEACON_REPLAYED_BLOCKS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "store_beacon_replayed_blocks_total", + "Total count of replayed blocks", + ) +}); +pub static STORE_BEACON_LOAD_COLD_BLOCKS_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_beacon_load_cold_blocks_time", + "Time spent loading blocks to replay for historic states", + ) +}); +pub static STORE_BEACON_LOAD_HOT_BLOCKS_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_beacon_load_hot_blocks_time", + "Time spent loading blocks to replay for hot states", + ) +}); +pub static STORE_BEACON_REPLAY_COLD_BLOCKS_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_replay_cold_blocks_time", + "Time spent replaying blocks for historic states", + ) + }); +pub static STORE_BEACON_COLD_BUILD_BEACON_CACHES_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "store_beacon_cold_build_beacon_caches_time", + "Time spent building caches on historic states", + ) + }); +pub static STORE_BEACON_REPLAY_HOT_BLOCKS_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_beacon_replay_hot_blocks_time", + "Time spent replaying blocks for hot states", + ) +}); +pub static STORE_BEACON_RECONSTRUCTION_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_beacon_reconstruction_time_seconds", + "Time taken to run a reconstruct historic states batch", + ) +}); pub static BEACON_DATA_COLUMNS_CACHE_HIT_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter( diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 8a66ec121e..22eecdcc60 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -1,18 +1,20 @@ use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, - HistoricalSummaries, RandaoMixes, StateRoots, + load_variable_list_from_db, load_vector_from_db, BlockRootsChunked, HistoricalRoots, + HistoricalSummaries, RandaoMixes, StateRootsChunked, }; -use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; -use ssz::{Decode, DecodeError, Encode}; +use crate::{Error, KeyValueStore}; +use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use types::historical_summary::HistoricalSummary; use types::superstruct; use types::*; -/// Lightweight variant of the `BeaconState` that is stored in the database. +/// DEPRECATED Lightweight variant of the `BeaconState` that is stored in the database. /// /// Utilises lazy-loading from separate storage for its vector fields. +/// +/// This can be deleted once schema versions prior to V22 are no longer supported. #[superstruct( variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) @@ -134,7 +136,7 @@ where pub earliest_consolidation_epoch: Epoch, #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[superstruct(only(Electra))] pub pending_partial_withdrawals: List, @@ -142,163 +144,7 @@ where pub pending_consolidations: List, } -/// Implement the conversion function from BeaconState -> PartialBeaconState. -macro_rules! impl_from_state_forgetful { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => { - PartialBeaconState::$variant_name($struct_name { - // Versioning - genesis_time: $s.genesis_time, - genesis_validators_root: $s.genesis_validators_root, - slot: $s.slot, - fork: $s.fork, - - // History - latest_block_header: $s.latest_block_header.clone(), - block_roots: None, - state_roots: None, - historical_roots: None, - - // Eth1 - eth1_data: $s.eth1_data.clone(), - eth1_data_votes: $s.eth1_data_votes.clone(), - eth1_deposit_index: $s.eth1_deposit_index, - - // Validator registry - validators: $s.validators.clone(), - balances: $s.balances.clone(), - - // Shuffling - latest_randao_value: *$outer - .get_randao_mix($outer.current_epoch()) - .expect("randao at current epoch is OK"), - randao_mixes: None, - - // Slashings - slashings: $s.slashings.clone(), - - // Finality - justification_bits: $s.justification_bits.clone(), - previous_justified_checkpoint: $s.previous_justified_checkpoint, - current_justified_checkpoint: $s.current_justified_checkpoint, - finalized_checkpoint: $s.finalized_checkpoint, - - // Variant-specific fields - $( - $extra_fields: $s.$extra_fields.clone() - ),*, - - // Variant-specific optional - $( - $extra_fields_opt: None - ),* - }) - } -} - impl PartialBeaconState { - /// Convert a `BeaconState` to a `PartialBeaconState`, while dropping the optional fields. - pub fn from_state_forgetful(outer: &BeaconState) -> Self { - match outer { - BeaconState::Base(s) => impl_from_state_forgetful!( - s, - outer, - Base, - PartialBeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations], - [] - ), - BeaconState::Altair(s) => impl_from_state_forgetful!( - s, - outer, - Altair, - PartialBeaconStateAltair, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores - ], - [] - ), - BeaconState::Bellatrix(s) => impl_from_state_forgetful!( - s, - outer, - Bellatrix, - PartialBeaconStateBellatrix, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ], - [] - ), - BeaconState::Capella(s) => impl_from_state_forgetful!( - s, - outer, - Capella, - PartialBeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index - ], - [historical_summaries] - ), - BeaconState::Deneb(s) => impl_from_state_forgetful!( - s, - outer, - Deneb, - PartialBeaconStateDeneb, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index - ], - [historical_summaries] - ), - BeaconState::Electra(s) => impl_from_state_forgetful!( - s, - outer, - Electra, - PartialBeaconStateElectra, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index, - deposit_requests_start_index, - deposit_balance_to_consume, - exit_balance_to_consume, - earliest_exit_epoch, - consolidation_balance_to_consume, - earliest_consolidation_epoch, - pending_balance_deposits, - pending_partial_withdrawals, - pending_consolidations - ], - [historical_summaries] - ), - } - } - /// SSZ decode. pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). @@ -321,19 +167,13 @@ impl PartialBeaconState { )) } - /// Prepare the partial state for storage in the KV database. - pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { - let db_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); - KeyValueStoreOp::PutKeyValue(db_key, self.as_ssz_bytes()) - } - pub fn load_block_roots>( &mut self, store: &S, spec: &ChainSpec, ) -> Result<(), Error> { if self.block_roots().is_none() { - *self.block_roots_mut() = Some(load_vector_from_db::( + *self.block_roots_mut() = Some(load_vector_from_db::( store, self.slot(), spec, @@ -348,7 +188,7 @@ impl PartialBeaconState { spec: &ChainSpec, ) -> Result<(), Error> { if self.state_roots().is_none() { - *self.state_roots_mut() = Some(load_vector_from_db::( + *self.state_roots_mut() = Some(load_vector_from_db::( store, self.slot(), spec, @@ -563,7 +403,7 @@ impl TryInto> for PartialBeaconState { earliest_exit_epoch, consolidation_balance_to_consume, earliest_consolidation_epoch, - pending_balance_deposits, + pending_deposits, pending_partial_withdrawals, pending_consolidations ], diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 8ef4886565..9bec83a35c 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -1,14 +1,16 @@ //! Implementation of historic state reconstruction (given complete block history). use crate::hot_cold_store::{HotColdDB, HotColdDBError}; +use crate::metadata::ANCHOR_FOR_ARCHIVE_NODE; +use crate::metrics; use crate::{Error, ItemStore}; use itertools::{process_results, Itertools}; -use slog::info; +use slog::{debug, info}; use state_processing::{ per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::sync::Arc; -use types::{EthSpec, Hash256}; +use types::EthSpec; impl HotColdDB where @@ -16,11 +18,16 @@ where Hot: ItemStore, Cold: ItemStore, { - pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { - let Some(mut anchor) = self.get_anchor_info() else { - // Nothing to do, history is complete. + pub fn reconstruct_historic_states( + self: &Arc, + num_blocks: Option, + ) -> Result<(), Error> { + let mut anchor = self.get_anchor_info(); + + // Nothing to do, history is complete. + if anchor.all_historic_states_stored() { return Ok(()); - }; + } // Check that all historic blocks are known. if anchor.oldest_block_slot != 0 { @@ -29,37 +36,30 @@ where }); } - info!( + debug!( self.log, - "Beginning historic state reconstruction"; + "Starting state reconstruction batch"; "start_slot" => anchor.state_lower_limit, ); - let slots_per_restore_point = self.config.slots_per_restore_point; + let _t = metrics::start_timer(&metrics::STORE_BEACON_RECONSTRUCTION_TIME); // Iterate blocks from the state lower limit to the upper limit. - let lower_limit_slot = anchor.state_lower_limit; let split = self.get_split_info(); - let upper_limit_state = self.get_restore_point( - anchor.state_upper_limit.as_u64() / slots_per_restore_point, - &split, - )?; - let upper_limit_slot = upper_limit_state.slot(); + let lower_limit_slot = anchor.state_lower_limit; + let upper_limit_slot = std::cmp::min(split.slot, anchor.state_upper_limit); - // Use a dummy root, as we never read the block for the upper limit state. - let upper_limit_block_root = Hash256::repeat_byte(0xff); - - let block_root_iter = self.forwards_block_roots_iterator( - lower_limit_slot, - upper_limit_state, - upper_limit_block_root, - &self.spec, - )?; + // If `num_blocks` is not specified iterate all blocks. Add 1 so that we end on an epoch + // boundary when `num_blocks` is a multiple of an epoch boundary. We want to be *inclusive* + // of the state at slot `lower_limit_slot + num_blocks`. + let block_root_iter = self + .forwards_block_roots_iterator_until(lower_limit_slot, upper_limit_slot - 1, || { + Err(Error::StateShouldNotBeRequired(upper_limit_slot - 1)) + })? + .take(num_blocks.map_or(usize::MAX, |n| n + 1)); // The state to be advanced. - let mut state = self - .load_cold_state_by_slot(lower_limit_slot)? - .ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?; + let mut state = self.load_cold_state_by_slot(lower_limit_slot)?; state.build_caches(&self.spec)?; @@ -110,8 +110,19 @@ where // Stage state for storage in freezer DB. self.store_cold_state(&state_root, &state, &mut io_batch)?; - // If the slot lies on an epoch boundary, commit the batch and update the anchor. - if slot % slots_per_restore_point == 0 || slot + 1 == upper_limit_slot { + let batch_complete = + num_blocks.map_or(false, |n_blocks| slot == lower_limit_slot + n_blocks as u64); + let reconstruction_complete = slot + 1 == upper_limit_slot; + + // Commit the I/O batch if: + // + // - The diff/snapshot for this slot is required for future slots, or + // - The reconstruction batch is complete (we are about to return), or + // - Reconstruction is complete. + if self.hierarchy.should_commit_immediately(slot)? + || batch_complete + || reconstruction_complete + { info!( self.log, "State reconstruction in progress"; @@ -122,9 +133,9 @@ where self.cold_db.do_atomically(std::mem::take(&mut io_batch))?; // Update anchor. - let old_anchor = Some(anchor.clone()); + let old_anchor = anchor.clone(); - if slot + 1 == upper_limit_slot { + if reconstruction_complete { // The two limits have met in the middle! We're done! // Perform one last integrity check on the state reached. let computed_state_root = state.update_tree_hash_cache()?; @@ -136,23 +147,36 @@ where }); } - self.compare_and_set_anchor_info_with_write(old_anchor, None)?; + self.compare_and_set_anchor_info_with_write( + old_anchor, + ANCHOR_FOR_ARCHIVE_NODE, + )?; return Ok(()); } else { // The lower limit has been raised, store it. anchor.state_lower_limit = slot; - self.compare_and_set_anchor_info_with_write( - old_anchor, - Some(anchor.clone()), - )?; + self.compare_and_set_anchor_info_with_write(old_anchor, anchor.clone())?; + } + + // If this is the end of the batch, return Ok. The caller will run another + // batch when there is idle capacity. + if batch_complete { + debug!( + self.log, + "Finished state reconstruction batch"; + "start_slot" => lower_limit_slot, + "end_slot" => slot, + ); + return Ok(()); } } } - // Should always reach the `upper_limit_slot` and return early above. - Err(Error::StateReconstructionDidNotComplete) + // Should always reach the `upper_limit_slot` or the end of the batch and return early + // above. + Err(Error::StateReconstructionLogicError) })??; // Check that the split point wasn't mutated during the state reconstruction process. diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index 4be6536df9..0738b12ec0 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -26,7 +26,6 @@ fn build_node(env: &mut Environment) -> LocalBeaconNode { fn http_server_genesis_state() { let mut env = env_builder() .test_logger() - //.async_logger("debug", None) .expect("should build env logger") .multi_threaded_tokio_runtime() .expect("should start tokio runtime") diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index afb93f3657..546cc2ed41 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } -slot_clock = { workspace = true } -tokio = { workspace = true } slog = { workspace = true } +slot_clock = { workspace = true } task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 86c97af0da..44d7702e5f 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -15,6 +15,7 @@ * [The `validator-manager` Command](./validator-manager.md) * [Creating validators](./validator-manager-create.md) * [Moving validators](./validator-manager-move.md) + * [Managing validators](./validator-manager-api.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Partial Withdrawals](./partial-withdrawal.md) @@ -32,9 +33,8 @@ * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Installation](./ui-installation.md) - * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) + * [Authentication](./ui-authentication.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) @@ -65,3 +65,4 @@ * [Development Environment](./setup.md) * [FAQs](./faq.md) * [Protocol Developers](./developers.md) +* [Security Researchers](./security.md) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 345fff6981..b558279730 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -7,59 +7,70 @@ the _freezer_ or _cold DB_, and the portion storing recent states as the _hot DB In both the hot and cold DBs, full `BeaconState` data structures are only stored periodically, and intermediate states are reconstructed by quickly replaying blocks on top of the nearest state. For example, to fetch a state at slot 7 the database might fetch a full state from slot 0, and replay -blocks from slots 1-7 while omitting redundant signature checks and Merkle root calculations. The -full states upon which blocks are replayed are referred to as _restore points_ in the case of the +blocks from slots 1-7 while omitting redundant signature checks and Merkle root calculations. In +the freezer DB, Lighthouse also uses hierarchical state diffs to jump larger distances (described in +more detail below). + +The full states upon which blocks are replayed are referred to as _snapshots_ in the case of the freezer DB, and _epoch boundary states_ in the case of the hot DB. The frequency at which the hot database stores full `BeaconState`s is fixed to one-state-per-epoch in order to keep loads of recent states performant. For the freezer DB, the frequency is -configurable via the `--slots-per-restore-point` CLI flag, which is the topic of the next section. +configurable via the `--hierarchy-exponents` CLI flag, which is the topic of the next section. -## Freezer DB Space-time Trade-offs +## Hierarchical State Diffs -Frequent restore points use more disk space but accelerate the loading of historical states. -Conversely, infrequent restore points use much less space, but cause the loading of historical -states to slow down dramatically. A lower _slots per restore point_ value (SPRP) corresponds to more -frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows -some example values. +Since v6.0.0, Lighthouse's freezer database uses _hierarchical state diffs_ or _hdiffs_ for short. +These diffs allow Lighthouse to reconstruct any historic state relatively quickly from a very +compact database. The essence of the hdiffs is that full states (snapshots) are stored only around +once per year. To reconstruct a particular state, Lighthouse fetches the last snapshot prior to that +state, and then applies several _layers_ of diffs. For example, to access a state from November +2022, we might fetch the yearly snapshot for the start of 2022, then apply a monthly diff to jump to +November, and then more granular diffs to reach the particular week, day and epoch desired. +Usually for the last stretch between the start of the epoch and the state requested, some blocks +will be _replayed_. -| Use Case | SPRP | Yearly Disk Usage*| Load Historical State | -|----------------------------|------|-------------------|-----------------------| -| Research | 32 | more than 10 TB | 155 ms | -| Enthusiast (prev. default) | 2048 | hundreds of GB | 10.2 s | -| Validator only (default) | 8192 | tens of GB | 41 s | +The following diagram shows part of the layout of diffs in the default configuration. There is a +full snapshot stored every `2^21` slots. In the next layer there are diffs every `2^18` slots which +approximately correspond to "monthly" diffs. Following this are more granular diffs every `2^16` +slots, every `2^13` slots, and so on down to the per-epoch diffs every `2^5` slots. -*Last update: Dec 2023. +![Tree diagram displaying hierarchical state diffs](./imgs/db-freezer-layout.png) -As we can see, it's a high-stakes trade-off! The relationships to disk usage and historical state -load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP -is 32, and the maximum is 8192. +The number of layers and frequency of diffs is configurable via the `--hierarchy-exponents` flag, +which has a default value of `5,9,11,13,16,18,21`. The hierarchy exponents must be provided in order +from smallest to largest. The smallest exponent determines the frequency of the "closest" layer +of diffs, with the default value of 5 corresponding to a diff every `2^5` slots (every epoch). +The largest number determines the frequency of full snapshots, with the default value of 21 +corresponding to a snapshot every `2^21` slots (every 291 days). -The default value is 8192 for databases synced from scratch using Lighthouse v2.2.0 or later, or -2048 for prior versions. Please see the section on [Defaults](#defaults) below. +The number of possible `--hierarchy-exponents` configurations is extremely large and our exploration +of possible configurations is still in its relative infancy. If you experiment with non-default +values of `--hierarchy-exponents` we would be interested to hear how it goes. A few rules of thumb +that we have observed are: -The values shown in the table are approximate, calculated using a simple heuristic: each -`BeaconState` consumes around 145MB of disk space, and each block replayed takes around 5ms. The -**Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. -The **Load Historical State** time is the worst-case load time for a state in the last slot -before a restore point. +- **More frequent snapshots = more space**. This is quite intuitive - if you store full states more + often then these will take up more space than diffs. However what you lose in space efficiency you + may gain in speed. It would be possible to achieve a configuration similar to Lighthouse's + previous `--slots-per-restore-point 32` using `--hierarchy-exponents 5`, although this would use + _a lot_ of space. It's even possible to push beyond that with `--hierarchy-exponents 0` which + would store a full state every single slot (NOT RECOMMENDED). +- **Less diff layers are not necessarily faster**. One might expect that the fewer diff layers there + are, the less work Lighthouse would have to do to reconstruct any particular state. In practice + this seems to be offset by the increased size of diffs in each layer making the diffs take longer + to apply. We observed no significant performance benefit from `--hierarchy-exponents 5,7,11`, and + a substantial increase in space consumed. -To run a full archival node with fast access to beacon states and a SPRP of 32, the disk usage will be more than 10 TB per year, which is impractical for many users. As such, users may consider running the [tree-states](https://github.com/sigp/lighthouse/releases/tag/v5.0.111-exp) release, which only uses less than 200 GB for a full archival node. The caveat is that it is currently experimental and in alpha release (as of Dec 2023), thus not recommended for running mainnet validators. Nevertheless, it is suitable to be used for analysis purposes, and if you encounter any issues in tree-states, we do appreciate any feedback. We plan to have a stable release of tree-states in 1H 2024. - -### Defaults - -As of Lighthouse v2.2.0, the default slots-per-restore-point value has been increased from 2048 -to 8192 in order to conserve disk space. Existing nodes will continue to use SPRP=2048 unless -re-synced. Note that it is currently not possible to change the SPRP without re-syncing, although -fast re-syncing may be achieved with [Checkpoint Sync](./checkpoint-sync.md). +If in doubt, we recommend running with the default configuration! It takes a long time to +reconstruct states in any given configuration, so it might be some time before the optimal +configuration is determined. ### CLI Configuration -To configure your Lighthouse node's database with a non-default SPRP, run your Beacon Node with -the `--slots-per-restore-point` flag: +To configure your Lighthouse node's database, run your beacon node with the `--hierarchy-exponents` flag: ```bash -lighthouse beacon_node --slots-per-restore-point 32 +lighthouse beacon_node --hierarchy-exponents "5,7,11" ``` ### Historic state cache @@ -72,17 +83,20 @@ The historical state cache size can be specified with the flag `--historic-state lighthouse beacon_node --historic-state-cache-size 4 ``` -> Note: This feature will cause high memory usage. +> Note: Use a large cache limit can lead to high memory usage. ## Glossary -* _Freezer DB_: part of the database storing finalized states. States are stored in a sparser +- _Freezer DB_: part of the database storing finalized states. States are stored in a sparser format, and usually less frequently than in the hot DB. -* _Cold DB_: see _Freezer DB_. -* _Hot DB_: part of the database storing recent states, all blocks, and other runtime data. Full +- _Cold DB_: see _Freezer DB_. +- _HDiff_: hierarchical state diff. +- _Hierarchy Exponents_: configuration for hierarchical state diffs, which determines the density + of stored diffs and snapshots in the freezer DB. +- _Hot DB_: part of the database storing recent states, all blocks, and other runtime data. Full states are stored every epoch. -* _Restore Point_: a full `BeaconState` stored periodically in the freezer DB. -* _Slots Per Restore Point (SPRP)_: the number of slots between restore points in the freezer DB. -* _Split Slot_: the slot at which states are divided between the hot and the cold DBs. All states +- _Snapshot_: a full `BeaconState` stored periodically in the freezer DB. Approximately yearly by + default (every ~291 days). +- _Split Slot_: the slot at which states are divided between the hot and the cold DBs. All states from slots less than the split slot are in the freezer, while all states with slots greater than or equal to the split slot are in the hot DB. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 732b4f51e6..c0f6b5485e 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -68,7 +68,7 @@ The steps to do port forwarding depends on the router, but the general steps are 1. Determine the default gateway IP: - On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. - - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. + - On macOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. @@ -91,7 +91,7 @@ The steps to do port forwarding depends on the router, but the general steps are - Internal port: `9001` - IP address: Choose the device that is running Lighthouse. -1. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). +1. To check that you have successfully opened the ports, go to [`yougetsignal`](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). ## ENR Configuration @@ -141,7 +141,7 @@ To listen over both IPv4 and IPv6: - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note - that this behaviour differs from the Ipv6 only case described above. + that this behaviour differs from the IPv6 only case described above. - If necessary, set the `--port6` flag to configure the port used for TCP and UDP over IPv6. This flag has no effect when listening over IPv6 only. - If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index b63505c490..5428ab8f9a 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -508,23 +508,31 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { - "schema_version": 18, + "schema_version": 22, "config": { - "slots_per_restore_point": 8192, - "slots_per_restore_point_set_explicitly": false, "block_cache_size": 5, + "state_cache_size": 128, + "compression_level": 1, "historic_state_cache_size": 1, + "hdiff_buffer_cache_size": 16, "compact_on_init": false, "compact_on_prune": true, "prune_payloads": true, + "hierarchy_config": { + "exponents": [ + 5, + 7, + 11 + ] + }, "prune_blobs": true, "epochs_per_blob_prune": 1, "blob_prune_margin_epochs": 0 }, "split": { - "slot": "7454656", - "state_root": "0xbecfb1c8ee209854c611ebc967daa77da25b27f1a8ef51402fdbe060587d7653", - "block_root": "0x8730e946901b0a406313d36b3363a1b7091604e1346a3410c1a7edce93239a68" + "slot": "10530592", + "state_root": "0xd27e6ce699637cf9b5c7ca632118b7ce12c2f5070bb25a27ac353ff2799d4466", + "block_root": "0x71509a1cb374773d680cd77148c73ab3563526dacb0ab837bb0c87e686962eae" }, "anchor": { "anchor_slot": "7451168", @@ -543,8 +551,19 @@ curl "http://localhost:5052/lighthouse/database/info" | jq For more information about the split point, see the [Database Configuration](./advanced_database.md) docs. -The `anchor` will be `null` unless the node has been synced with checkpoint sync and state -reconstruction has yet to be completed. For more information +For archive nodes, the `anchor` will be: + +```json +"anchor": { + "anchor_slot": "0", + "oldest_block_slot": "0", + "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_upper_limit": "0", + "state_lower_limit": "0" + }, +``` + +indicating that all states with slots `>= 0` are available, i.e., full state history. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index adde78270a..f792ee870e 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -18,12 +18,13 @@ Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ## Obtaining the API token The API token is stored as a file in the `validators` directory. For most users -this is `~/.lighthouse/{network}/validators/api-token.txt`. Here's an -example using the `cat` command to print the token to the terminal, but any +this is `~/.lighthouse/{network}/validators/api-token.txt`, unless overridden using the +`--http-token-path` CLI parameter. Here's an +example using the `cat` command to print the token for mainnet to the terminal, but any text editor will suffice: ```bash -cat api-token.txt +cat ~/.lighthouse/mainnet/validators/api-token.txt hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 80eba7a059..98605a3dcd 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -53,7 +53,7 @@ Example Response Body: } ``` -> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. +> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you've specified a custom token path using `--http-token-path`, use that path instead. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. > As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh`. In this case, you obtain the token from the file `api-token.txt` and the command becomes: diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 6d75b90100..a9bfb00ccd 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,6 +16,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes | | v5.2.0 | Jun 2024 | v19 | no | | v5.1.0 | Mar 2024 | v19 | no | @@ -208,6 +209,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes | | v5.2.0 | Jun 2024 | v19 | yes before Deneb using <= v5.2.1 | | v5.1.0 | Mar 2024 | v19 | yes before Deneb using <= v5.2.1 | diff --git a/book/src/faq.md b/book/src/faq.md index 04e5ce5bc8..d23951c8c7 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -92,7 +92,7 @@ If the reason for the error message is caused by no. 1 above, you may want to lo - Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. - The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. -- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using Geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. ### I see beacon logs showing `Error during execution engine upcheck`, what should I do? @@ -302,7 +302,7 @@ An example of the log: (debug logs can be found under `$datadir/beacon/logs`): Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441 ``` -The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. +The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation will fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. Another example of log: @@ -315,7 +315,7 @@ In this example, we see that the `execution_time_ms` is 4694ms. The `execution_t ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? -In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. +In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. diff --git a/book/src/graffiti.md b/book/src/graffiti.md index ba9c7d05d7..7b402ea866 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -4,7 +4,7 @@ Lighthouse provides four options for setting validator graffiti. ## 1. Using the "--graffiti-file" flag on the validator client -Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded everytime a validator is chosen to propose a block. +Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded every time a validator is chosen to propose a block. Usage: `lighthouse vc --graffiti-file graffiti_file.txt` diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 69701a3ad9..a4ab44748c 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -5,7 +5,7 @@ The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides a HTTP API for querying the beacon chain and publishing messages to the network. -Usage: lighthouse beacon_node [OPTIONS] +Usage: lighthouse beacon_node [OPTIONS] --execution-endpoint Options: --auto-compact-db @@ -166,9 +166,23 @@ Options: --graffiti Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated to fit in 32 bytes. + --hdiff-buffer-cache-size + Number of hierarchical diff (hdiff) buffers to cache in memory. Each + buffer is around the size of a BeaconState so you should be cautious + about setting this value too high. This flag is irrelevant for most + nodes, which run with state pruning enabled. [default: 16] + --hierarchy-exponents + Specifies the frequency for storing full state snapshots and + hierarchical diffs in the freezer DB. Accepts a comma-separated list + of ascending exponents. Each exponent defines an interval for storing + diffs to the layer above. The last exponent defines the interval for + full snapshots. For example, a config of '4,8,12' would store a full + snapshot every 4096 (2^12) slots, first-level diffs every 256 (2^8) + slots, and second-level diffs every 16 (2^4) slots. Cannot be changed + after initialization. [default: 5,9,11,13,16,18,21] --historic-state-cache-size - Specifies how many states from the freezer database should cache in - memory [default: 1] + Specifies how many states from the freezer database should be cached + in memory [default: 1] --http-address
Set the listen address for the RESTful HTTP API server. --http-allow-origin @@ -364,9 +378,7 @@ Options: --slasher-validator-chunk-size Number of validators per chunk stored on disk. --slots-per-restore-point - Specifies how often a freezer DB restore point should be stored. - Cannot be changed after initialization. [default: 8192 (mainnet) or 64 - (minimal)] + DEPRECATED. This flag has no effect. --state-cache-size Specifies the size of the state cache [default: 128] --suggested-fee-recipient @@ -468,9 +480,6 @@ Flags: --disable-upnp Disables UPnP support. Setting this will prevent Lighthouse from attempting to automatically establish external port mappings. - --dummy-eth1 - If present, uses an eth1 backend that generates static dummy - data.Identical to the method used at the 2019 Canada interop. -e, --enr-match Sets the local ENR IP address and port to match those set for lighthouse. Specifically, the IP address will be the value of @@ -478,10 +487,6 @@ Flags: --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses. - --eth1 - If present the node will connect to an eth1 node. This is required for - block production, you must use this flag if you wish to serve a - validator. --eth1-purge-cache Purges the eth1 block and deposit caches --genesis-backfill @@ -549,8 +554,7 @@ Flags: --staking Standard option for a staking beacon node. This will enable the HTTP server on localhost:5052 and import deposit logs from the execution - node. This is equivalent to `--http` on merge-ready networks, or - `--http --eth1` pre-merge + node. --stdin-inputs If present, read all user inputs from stdin instead of tty. --subscribe-all-subnets diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 2cfbfbc857..71e21d68c9 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -69,6 +69,10 @@ Options: this server (e.g., http://localhost:5062). --http-port Set the listen TCP port for the RESTful HTTP API server. + --http-token-path + Path to file containing the HTTP API token for validator client + authentication. If not specified, defaults to + {validators-dir}/api-token.txt. --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 9b6c5d4f3b..50c204f371 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -23,6 +23,11 @@ Commands: "create-validators" command. This command only supports validators signing via a keystore on the local file system (i.e., not Web3Signer validators). + list + Lists all validators in a validator client using the HTTP API. + delete + Deletes one or more validators from a validator client using the HTTP + API. help Print this message or the help of the given subcommand(s) diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index b4999d3fe3..68aab768ae 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -5,9 +5,17 @@ Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be generated using the "create-validators" command. -Usage: lighthouse validator_manager import [OPTIONS] --validators-file +Usage: lighthouse validator_manager import [OPTIONS] Options: + --builder-boost-factor + When provided, the imported validator will use this percentage + multiplier to apply to the builder's payload value when choosing + between a builder payload header and payload from the local execution + node. + --builder-proposals + When provided, the imported validator will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] -d, --datadir Used to specify a custom root data directory for lighthouse keys and databases. Defaults to $HOME/.lighthouse/{network} where network is @@ -17,6 +25,10 @@ Options: Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: info, debug, trace, warn, error, crit] + --gas-limit + When provided, the imported validator will use this gas limit. It is + recommended to leave this as the default value by not specifying this + flag. --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with @@ -26,6 +38,10 @@ Options: --genesis-state-url-timeout The timeout in seconds for the request to --genesis-state-url. [default: 180] + --keystore-file + The path to a keystore JSON file to be imported to the validator + client. This file is usually created using staking-deposit-cli or + ethstaker-deposit-cli --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] @@ -50,6 +66,15 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] + --password + Password of the keystore file. + --prefer-builder-proposals + When provided, the imported validator will always prefer blocks + constructed by builders, regardless of payload value. [possible + values: true, false] + --suggested-fee-recipient + When provided, the imported validator will use the suggested fee + recipient. Omit this flag to use the default value from the VC. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing @@ -60,10 +85,8 @@ Options: --vc-token The file containing a token required by the validator client. --vc-url - A HTTP(S) address of a validator client using the keymanager-API. If - this value is not supplied then a 'dry run' will be conducted where no - changes are made to the validator client. [default: - http://localhost:5062] + A HTTP(S) address of a validator client using the keymanager-API. + [default: http://localhost:5062] Flags: --disable-log-timestamp diff --git a/book/src/homebrew.md b/book/src/homebrew.md index da92dcb26c..f94764889e 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repository. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/imgs/db-freezer-layout.png b/book/src/imgs/db-freezer-layout.png new file mode 100644 index 0000000000..1870eb4267 Binary files /dev/null and b/book/src/imgs/db-freezer-layout.png differ diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 4a00f33aa4..fca156bda3 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -46,24 +46,31 @@ You can track the reasons for re-orgs being attempted (or not) via Lighthouse's A pair of messages at `INFO` level will be logged if a re-org opportunity is detected: -> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 - -> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +```text +INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +``` This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: -> DEBG Not attempting re-org reason: head not late +```text +DEBG Not attempting re-org reason: head not late +``` If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages when Lighthouse thinks that a re-org is likely: -> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` [the spec]: https://github.com/ethereum/consensus-specs/pull/3034 diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 106a5e8947..f2662f4a69 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -21,7 +21,6 @@ The UI is currently in active development. It resides in the See the following Siren specific topics for more context-specific information: -- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. - [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. diff --git a/book/src/resources/2020-lh-trail-of-bits.pdf b/book/src/resources/2020-lh-trail-of-bits.pdf new file mode 100644 index 0000000000..162bef53f0 Binary files /dev/null and b/book/src/resources/2020-lh-trail-of-bits.pdf differ diff --git a/book/src/security.md b/book/src/security.md new file mode 100644 index 0000000000..0af57db7f9 --- /dev/null +++ b/book/src/security.md @@ -0,0 +1,11 @@ +# Security + +Lighthouse takes security seriously. Please see our security policy on GitHub for our PGP key and information on reporting vulnerabilities: + +- [GitHub: Security Policy](https://github.com/sigp/lighthouse/blob/stable/SECURITY.md) + +## Past Security Assessments + +Reports from previous security assessments can be found below: + +- [December 2020 - Trail of Bits](./resources/2020-lh-trail-of-bits.pdf) diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md index 9e3a94db78..81b867bae2 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui-authentication.md @@ -2,12 +2,12 @@ ## Siren Session -For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of user validators. The session password must be set during the [installation](./ui-installation.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. +For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of the user's validators. The session password must be set during the [configuration](./ui-configuration.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. ![exit](imgs/ui-session.png) ## Protected Actions -Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [installation process](./ui-installation.md). +Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [configuration process](./ui-configuration.md). ![exit](imgs/ui-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index eeb2c9a51c..34cc9fe7ca 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -1,37 +1,116 @@ -# Configuration +# 📦 Installation + +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren). + +## Version Requirement + +To ensure proper functionality, the Siren app requires Lighthouse v4.3.0 or higher. You can find these versions on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. + +## Configuration Siren requires a connection to both a Lighthouse Validator Client and a Lighthouse Beacon Node. -To enable connection, you must generate .env file based on the provided .env.example - -## Connecting to the Clients Both the Beacon node and the Validator client need to have their HTTP APIs enabled. -These ports should be accessible from Siren. +These ports should be accessible from Siren. This means adding the flag `--http` on both beacon node and validator client. To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. > The Beacon Node must be run with the `--gui` flag set. -If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. +## Running the Docker container (Recommended) -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. +We recommend running Siren's container next to your beacon node (on the same server), as it's essentially a webapp that you can access with any browser. -In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. + 1. Create a directory to run Siren: -If you run the Docker container, it will fail to startup if your BN/VC are not accessible, or if you provided a wrong API token. + ```bash + cd ~ + mkdir Siren + cd Siren + ``` -## API Token + 1. Create a configuration file in the `Siren` directory: `nano .env` and insert the following fields to the `.env` file. The field values are given here as an example, modify the fields as necessary. For example, the `API_TOKEN` can be obtained from [`Validator Client Authorization Header`](./api-vc-auth-header.md) -The API Token is a secret key that allows you to connect to the validator -client. The validator client's HTTP API is guarded by this key because it -contains sensitive validator information and the ability to modify -validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) -for further details. + A full example with all possible configuration options can be found [here](https://github.com/sigp/siren/blob/stable/.env.example). -Siren requires this token in order to connect to the Validator client. -The token is located in the default data directory of the validator -client. The default path is -`~/.lighthouse//validators/api-token.txt`. + ``` + BEACON_URL=http://localhost:5052 + VALIDATOR_URL=http://localhost:5062 + API_TOKEN=R6YhbDO6gKjNMydtZHcaCovFbQ0izq5Hk + SESSION_PASSWORD=your_password + ``` -The contents of this file for the desired validator client needs to be -entered. + 1. You can now start Siren with: + + ```bash + docker run --rm -ti --name siren --env-file $PWD/.env --net host sigp/siren + ``` + + Note that, due to the `--net=host` flag, this will expose Siren on ports 3000, 80, and 443. Preferably, only the latter should be accessible. Adjust your firewall and/or skip the flag wherever possible. + + If it fails to start, an error message will be shown. For example, the error + + ``` + http://localhost:5062 unreachable, check settings and connection + ``` + + means that the validator client is not running, or the `--http` flag is not provided, or otherwise inaccessible from within the container. Another common error is: + + ``` + validator api issue, server response: 403 + ``` + + which means that the API token is incorrect. Check that you have provided the correct token in the field `API_TOKEN` in `.env`. + + When Siren has successfully started, you should see the log `LOG [NestApplication] Nest application successfully started +118ms`, indicating that Siren has started. + + 1. Siren is now accessible at `https://` (when used with `--net=host`). You will get a warning about an invalid certificate, this can be safely ignored. + + > Note: We recommend setting a strong password when running Siren to protect it from unauthorized access. + +Advanced users can mount their own certificates or disable SSL altogether, see the `SSL Certificates` section below. + +## Building From Source + +### Docker + +The docker image can be built with the following command: +`docker build -f Dockerfile -t siren .` + +### Building locally + +To build from source, ensure that your system has `Node v18.18` and `yarn` installed. + +#### Build and run the backend + +Navigate to the backend directory `cd backend`. Install all required Node packages by running `yarn`. Once the installation is complete, compile the backend with `yarn build`. Deploy the backend in a production environment, `yarn start:production`. This ensures optimal performance. + +#### Build and run the frontend + +After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. + +This will allow you to access siren at `http://localhost:3000` by default. + +## Advanced configuration + +### About self-signed SSL certificates + +By default, internally, Siren is running on port 80 (plain, behind nginx), port 3000 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). + +#### Generating persistent SSL certificates and installing them to your system + +[mkcert](https://github.com/FiloSottile/mkcert) is a tool that makes it super easy to generate a self-signed certificate that is trusted by your browser. + +To use it for `siren`, install it following the instructions. Then, run `mkdir certs; mkcert -cert-file certs/cert.pem -key-file certs/key.pem 127.0.0.1 localhost` (add or replace any IP or hostname that you would use to access it at the end of this command). +To use these generated certificates, add this to to your `docker run` command: `-v $PWD/certs:/certs` + +The nginx SSL config inside Siren's container expects 3 files: `/certs/cert.pem` `/certs/key.pem` `/certs/key.pass`. If `/certs/cert.pem` does not exist, it will generate a self-signed certificate as mentioned above. If `/certs/cert.pem` does exist, it will attempt to use your provided or persisted certificates. + +### Configuration through environment variables + +For those who prefer to use environment variables to configure Siren instead of using an `.env` file, this is fully supported. In some cases this may even be preferred. + +#### Docker installed through `snap` + +If you installed Docker through a snap (i.e. on Ubuntu), Docker will have trouble accessing the `.env` file. In this case it is highly recommended to pass the config to the container with environment variables. +Note that the defaults in `.env.example` will be used as fallback, if no other value is provided. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index efa6d3d4ab..29de889e5f 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -6,19 +6,20 @@ Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to func ## 2. Where can I find my API token? -The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). +The required API token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? -If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui [`configuration`](./ui-configuration.md#configuration). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +That being said, it is entirely possible to have it published over the internet, how to do that goes well beyond the scope of this document but we want to emphasize once more the need for *at least* SSL encryption if you choose to do so. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? -Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`configuration`](./ui-configuration.md#configuration). ## 6. Does Siren support reverse proxy or DNS named addresses? diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index 1444c0d633..9cd84e5160 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -1,6 +1,6 @@ # 📦 Installation -Siren supports any operating system that supports container runtimes and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, macOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. ## Version Requirement diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 092c813a1e..eef563dcdb 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -56,7 +56,6 @@ The following fields are returned: able to vote) during the current epoch. - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to the majority-elected Casper FFG target epoch during the current epoch. -- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. diff --git a/book/src/validator-manager-api.md b/book/src/validator-manager-api.md new file mode 100644 index 0000000000..a5fc69fd5a --- /dev/null +++ b/book/src/validator-manager-api.md @@ -0,0 +1,39 @@ +# Managing Validators + +The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. + +## Delete + +The `delete` command deletes one or more validators from the validator client. It will also modify the `validator_definitions.yml` file automatically so there is no manual action required from the user after the delete. To `delete`: + +```bash +lighthouse vm delete --vc-token --validators pubkey1,pubkey2 +``` + +Example: + +```bash +lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4,0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f +``` + +## Import + +The `import` command imports validator keystores generated by the staking-deposit-cli/ethstaker-deposit-cli. To import a validator keystore: + +```bash +lighthouse vm import --vc-token --keystore-file /path/to/json --password keystore_password +``` + +Example: + +``` +lighthouse vm import --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --keystore-file keystore.json --password keystore_password +``` + +## List + +To list the validators running on the validator client: + +```bash +lighthouse vm list --vc-token ~/.lighthouse/mainnet/validators/api-token.txt +``` diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index d97f953fc1..b4c86dc6da 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -69,6 +69,8 @@ lighthouse \ > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. +> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator-manager-api.md#import). + ## Detailed Guide This guide will create two validators and import them to a VC. For simplicity, diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md index a71fab1e3a..11df2af037 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator-manager.md @@ -32,3 +32,4 @@ The `validator-manager` boasts the following features: - [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) - [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) +- [Managing validators such as delete, import and list validators.](./validator-manager-api.md) diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 6439ea83a3..bbc95460ec 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -134,7 +134,7 @@ validator_monitor_attestation_simulator_source_attester_hit_total validator_monitor_attestation_simulator_source_attester_miss_total ``` -A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). +A Grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 76d41ae11a..7c8d2b16fd 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,24 +1,24 @@ [package] name = "boot_node" -version = "5.3.0" +version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] beacon_node = { workspace = true } +bytes = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } +eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } +hex = { workspace = true } +lighthouse_network = { workspace = true } log = { workspace = true } -slog-term = { workspace = true } logging = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -hex = { workspace = true } -serde = { workspace = true } -eth2_network_config = { workspace = true } -bytes = { workspace = true } +slog-term = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 00738462e0..96032dddcc 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -136,8 +136,8 @@ pub async fn run( "active_sessions" => metrics.active_sessions, "requests/s" => format_args!("{:.2}", metrics.unsolicited_requests_per_second), "ipv4_nodes" => ipv4_only_reachable, - "ipv6_nodes" => ipv6_only_reachable, - "ipv6_and_ipv4_nodes" => ipv4_ipv6_reachable, + "ipv6_only_nodes" => ipv6_only_reachable, + "dual_stack_nodes" => ipv4_ipv6_reachable, "unreachable_nodes" => unreachable_nodes, ); diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index e66bf14233..dece975d37 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "account_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } -eth2_wallet = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } +eth2_wallet = { workspace = true } filesystem = { workspace = true } -zeroize = { workspace = true } +rand = { workspace = true } +regex = { workspace = true } +rpassword = "5.0.0" serde = { workspace = true } serde_yaml = { workspace = true } slog = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } -regex = { workspace = true } -rpassword = "5.0.0" -directory = { workspace = true } +zeroize = { workspace = true } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 2c8bbbf4b4..0f576efb3a 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -8,7 +8,6 @@ use eth2_wallet::{ }; use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; -use serde::{Deserialize, Serialize}; use std::fs::{self, File}; use std::io; use std::io::prelude::*; @@ -16,7 +15,7 @@ use std::path::{Path, PathBuf}; use std::str::from_utf8; use std::thread::sleep; use std::time::Duration; -use zeroize::Zeroize; +use zeroize::Zeroizing; pub mod validator_definitions; @@ -66,8 +65,8 @@ pub fn read_password>(path: P) -> Result { fs::read(path).map(strip_off_newlines).map(Into::into) } -/// Reads a password file into a `ZeroizeString` struct, with new-lines removed. -pub fn read_password_string>(path: P) -> Result { +/// Reads a password file into a `Zeroizing` struct, with new-lines removed. +pub fn read_password_string>(path: P) -> Result, String> { fs::read(path) .map_err(|e| format!("Error opening file: {:?}", e)) .map(strip_off_newlines) @@ -109,8 +108,8 @@ pub fn random_password() -> PlainText { random_password_raw_string().into_bytes().into() } -/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `ZeroizeString`. -pub fn random_password_string() -> ZeroizeString { +/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `Zeroizing`. +pub fn random_password_string() -> Zeroizing { random_password_raw_string().into() } @@ -138,7 +137,7 @@ pub fn strip_off_newlines(mut bytes: Vec) -> Vec { } /// Reads a password from TTY or stdin if `use_stdin == true`. -pub fn read_password_from_user(use_stdin: bool) -> Result { +pub fn read_password_from_user(use_stdin: bool) -> Result, String> { let result = if use_stdin { rpassword::prompt_password_stderr("") .map_err(|e| format!("Error reading from stdin: {}", e)) @@ -147,7 +146,7 @@ pub fn read_password_from_user(use_stdin: bool) -> Result .map_err(|e| format!("Error reading from tty: {}", e)) }; - result.map(ZeroizeString::from) + result.map(Zeroizing::from) } /// Reads a mnemonic phrase from TTY or stdin if `use_stdin == true`. @@ -207,38 +206,6 @@ pub fn mnemonic_from_phrase(phrase: &str) -> Result { Mnemonic::from_phrase(phrase, Language::English).map_err(|e| e.to_string()) } -/// Provides a new-type wrapper around `String` that is zeroized on `Drop`. -/// -/// Useful for ensuring that password memory is zeroed-out on drop. -#[derive(Clone, PartialEq, Serialize, Deserialize, Zeroize)] -#[zeroize(drop)] -#[serde(transparent)] -pub struct ZeroizeString(String); - -impl From for ZeroizeString { - fn from(s: String) -> Self { - Self(s) - } -} - -impl ZeroizeString { - pub fn as_str(&self) -> &str { - &self.0 - } - - /// Remove any number of newline or carriage returns from the end of a vector of bytes. - pub fn without_newlines(&self) -> ZeroizeString { - let stripped_string = self.0.trim_end_matches(['\r', '\n']).into(); - Self(stripped_string) - } -} - -impl AsRef<[u8]> for ZeroizeString { - fn as_ref(&self) -> &[u8] { - self.0.as_bytes() - } -} - pub fn read_mnemonic_from_cli( mnemonic_path: Option, stdin_inputs: bool, @@ -283,54 +250,6 @@ pub fn read_mnemonic_from_cli( mod test { use super::*; - #[test] - fn test_zeroize_strip_off() { - let expected = "hello world"; - - assert_eq!( - ZeroizeString::from("hello world\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\n\n\n\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r\r\r\r\r".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r\n\r\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world".to_string()) - .without_newlines() - .as_str(), - expected - ); - } - #[test] fn test_strip_off() { let expected = b"hello world".to_vec(); diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index f228ce5fdf..a4850fc1c6 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -3,9 +3,7 @@ //! Serves as the source-of-truth of which validators this validator client should attempt (or not //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. -use crate::{ - default_keystore_password_path, read_password_string, write_file_via_temporary, ZeroizeString, -}; +use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; @@ -17,6 +15,7 @@ use std::io; use std::path::{Path, PathBuf}; use types::{graffiti::GraffitiString, Address, PublicKey}; use validator_dir::VOTING_KEYSTORE_FILE; +use zeroize::Zeroizing; /// The file name for the serialized `ValidatorDefinitions` struct. pub const CONFIG_FILENAME: &str = "validator_definitions.yml"; @@ -52,7 +51,7 @@ pub enum Error { /// Defines how a password for a validator keystore will be persisted. pub enum PasswordStorage { /// Store the password in the `validator_definitions.yml` file. - ValidatorDefinitions(ZeroizeString), + ValidatorDefinitions(Zeroizing), /// Store the password in a separate, dedicated file (likely in the "secrets" directory). File(PathBuf), /// Don't store the password at all. @@ -93,7 +92,7 @@ pub enum SigningDefinition { #[serde(skip_serializing_if = "Option::is_none")] voting_keystore_password_path: Option, #[serde(skip_serializing_if = "Option::is_none")] - voting_keystore_password: Option, + voting_keystore_password: Option>, }, /// A validator that defers to a Web3Signer HTTP server for signing. /// @@ -107,7 +106,7 @@ impl SigningDefinition { matches!(self, SigningDefinition::LocalKeystore { .. }) } - pub fn voting_keystore_password(&self) -> Result, Error> { + pub fn voting_keystore_password(&self) -> Result>, Error> { match self { SigningDefinition::LocalKeystore { voting_keystore_password: Some(password), diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 73823ae24e..f3c166bda9 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -3,16 +3,15 @@ name = "clap_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] alloy-primitives = { workspace = true } clap = { workspace = true } -hex = { workspace = true } dirs = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } +hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index b4bbbaa436..19682bf367 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index a03ac2178f..953fde1af7 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,13 +7,13 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] +hex = { workspace = true } reqwest = { workspace = true } serde_json = { workspace = true } sha2 = { workspace = true } -hex = { workspace = true } [dependencies] -types = { workspace = true } +ethabi = "16.0.0" ethereum_ssz = { workspace = true } tree_hash = { workspace = true } -ethabi = "16.0.0" +types = { workspace = true } diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml index f724337261..9c3ced9097 100644 --- a/common/directory/Cargo.toml +++ b/common/directory/Cargo.toml @@ -3,7 +3,6 @@ name = "directory" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d23a4068f1..9d6dea100d 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -3,33 +3,30 @@ name = "eth2" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = { workspace = true } -serde_json = { workspace = true } -ssz_types = { workspace = true } -types = { workspace = true } -reqwest = { workspace = true } -lighthouse_network = { workspace = true } -proto_array = { workspace = true } -ethereum_serde_utils = { workspace = true } +derivative = { workspace = true } eth2_keystore = { workspace = true } -libsecp256k1 = { workspace = true } -ring = { workspace = true } -bytes = { workspace = true } -account_utils = { workspace = true } -sensitive_url = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures-util = "0.3.8" futures = { workspace = true } -store = { workspace = true } -slashing_protection = { workspace = true } +futures-util = "0.3.8" +lighthouse_network = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } -derivative = { workspace = true } +proto_array = { workspace = true } +reqwest = { workspace = true } +reqwest-eventsource = "0.5.0" +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slashing_protection = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +types = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 522c6414ea..12b1538984 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -27,6 +27,7 @@ use reqwest::{ Body, IntoUrl, RequestBuilder, Response, }; pub use reqwest::{StatusCode, Url}; +use reqwest_eventsource::{Event, EventSource}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; @@ -52,6 +53,8 @@ pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), + /// The `reqwest_eventsource` client raised an error. + SseClient(reqwest_eventsource::Error), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -93,6 +96,13 @@ impl Error { pub fn status(&self) -> Option { match self { Error::HttpClient(error) => error.inner().status(), + Error::SseClient(error) => { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error { + Some(*status) + } else { + None + } + } Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), @@ -2592,16 +2602,29 @@ impl BeaconNodeHttpClient { .join(","); path.query_pairs_mut().append_pair("topics", &topic_string); - Ok(self - .client - .get(path) - .send() - .await? - .bytes_stream() - .map(|next| match next { - Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()), - Err(e) => Err(Error::HttpClient(e.into())), - })) + let mut es = EventSource::get(path); + // If we don't await `Event::Open` here, then the consumer + // will not get any Message events until they start awaiting the stream. + // This is a way to register the stream with the sse server before + // message events start getting emitted. + while let Some(event) = es.next().await { + match event { + Ok(Event::Open) => break, + Err(err) => return Err(Error::SseClient(err)), + // This should never happen as we are guaranteed to get the + // Open event before any message starts coming through. + Ok(Event::Message(_)) => continue, + } + } + Ok(Box::pin(es.filter_map(|event| async move { + match event { + Ok(Event::Open) => None, + Ok(Event::Message(message)) => { + Some(EventKind::from_sse_bytes(&message.event, &message.data)) + } + Err(err) => Some(Err(Error::SseClient(err))), + } + }))) } /// `POST validator/duties/sync/{epoch}` diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e978d92245..66dd5d779b 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -361,7 +361,7 @@ pub struct DatabaseInfo { pub schema_version: u64, pub config: StoreConfig, pub split: Split, - pub anchor: Option, + pub anchor: AnchorInfo, pub blob_info: BlobInfo, } @@ -528,9 +528,9 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } - /// - /// Analysis endpoints. - /// + /* + Analysis endpoints. + */ /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot pub async fn get_lighthouse_analysis_block_rewards( diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 67fe77a315..1d1abcac79 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,6 +1,5 @@ use super::types::*; use crate::Error; -use account_utils::ZeroizeString; use reqwest::{ header::{HeaderMap, HeaderValue}, IntoUrl, @@ -14,6 +13,7 @@ use std::path::Path; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; use types::graffiti::GraffitiString; +use zeroize::Zeroizing; /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`). @@ -21,7 +21,7 @@ use types::graffiti::GraffitiString; pub struct ValidatorClientHttpClient { client: reqwest::Client, server: SensitiveUrl, - api_token: Option, + api_token: Option>, authorization_header: AuthorizationHeader, } @@ -79,18 +79,18 @@ impl ValidatorClientHttpClient { } /// Get a reference to this client's API token, if any. - pub fn api_token(&self) -> Option<&ZeroizeString> { + pub fn api_token(&self) -> Option<&Zeroizing> { self.api_token.as_ref() } /// Read an API token from the specified `path`, stripping any trailing whitespace. - pub fn load_api_token_from_file(path: &Path) -> Result { + pub fn load_api_token_from_file(path: &Path) -> Result, Error> { let token = fs::read_to_string(path).map_err(|e| Error::TokenReadError(path.into(), e))?; - Ok(ZeroizeString::from(token.trim_end().to_string())) + Ok(token.trim_end().to_string().into()) } /// Add an authentication token to use when making requests. - pub fn add_auth_token(&mut self, token: ZeroizeString) -> Result<(), Error> { + pub fn add_auth_token(&mut self, token: Zeroizing) -> Result<(), Error> { self.api_token = Some(token); self.authorization_header = AuthorizationHeader::Bearer; diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index ee05c29839..ae192312bd 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,7 +1,7 @@ -use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; use types::{Address, Graffiti, PublicKeyBytes}; +use zeroize::Zeroizing; pub use slashing_protection::interchange::Interchange; @@ -41,7 +41,7 @@ pub struct SingleKeystoreResponse { #[serde(deny_unknown_fields)] pub struct ImportKeystoresRequest { pub keystores: Vec, - pub passwords: Vec, + pub passwords: Vec>, pub slashing_protection: Option, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 1921549bcb..d7d5a00df5 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -1,13 +1,12 @@ -use account_utils::ZeroizeString; +pub use crate::lighthouse::Health; +pub use crate::lighthouse_vc::std_types::*; +pub use crate::types::{GenericResponse, VersionData}; use eth2_keystore::Keystore; use graffiti::GraffitiString; use serde::{Deserialize, Serialize}; use std::path::PathBuf; - -pub use crate::lighthouse::Health; -pub use crate::lighthouse_vc::std_types::*; -pub use crate::types::{GenericResponse, VersionData}; pub use types::*; +use zeroize::Zeroizing; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { @@ -44,7 +43,7 @@ pub struct ValidatorRequest { #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct CreateValidatorsMnemonicRequest { - pub mnemonic: ZeroizeString, + pub mnemonic: Zeroizing, #[serde(with = "serde_utils::quoted_u32")] pub key_derivation_path_offset: u32, pub validators: Vec, @@ -74,7 +73,7 @@ pub struct CreatedValidator { #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct PostValidatorsResponseData { - pub mnemonic: ZeroizeString, + pub mnemonic: Zeroizing, pub validators: Vec, } @@ -102,7 +101,7 @@ pub struct ValidatorPatchRequest { #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct KeystoreValidatorsPostRequest { - pub password: ZeroizeString, + pub password: Zeroizing, pub enable: bool, pub keystore: Keystore, #[serde(default)] @@ -191,7 +190,7 @@ pub struct SingleExportKeystoresResponse { #[serde(skip_serializing_if = "Option::is_none")] pub validating_keystore: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub validating_keystore_password: Option, + pub validating_keystore_password: Option>, } #[derive(Serialize, Deserialize, Debug)] diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c187399ebd..a303953a86 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -13,7 +13,7 @@ use serde_json::Value; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::fmt::{self, Display}; -use std::str::{from_utf8, FromStr}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use types::beacon_block_body::KzgCommitments; @@ -1153,24 +1153,7 @@ impl EventKind { } } - pub fn from_sse_bytes(message: &[u8]) -> Result { - let s = from_utf8(message) - .map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?; - - let mut split = s.split('\n'); - let event = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse event tag".to_string()) - })? - .trim_start_matches("event:"); - let data = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse data tag".to_string()) - })? - .trim_start_matches("data:"); - + pub fn from_sse_bytes(event: &str, data: &str) -> Result { match event { "attestation" => Ok(EventKind::Attestation(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Attestation: {:?}", e)), diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index 20c3b0b6f2..509f5ff87e 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } paste = { workspace = true } +types = { workspace = true } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index cd5d7a8bd4..50386feb8a 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -32,6 +32,7 @@ const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url ], checksum: "0xd750639607c337bbb192b15c27f447732267bf72d1650180a0e44c2d93a80741", genesis_validators_root: "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1", + genesis_state_root: "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0", }; const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { @@ -39,6 +40,7 @@ const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url urls: &[], checksum: "0xd4a039454c7429f1dfaa7e11e397ef3d0f50d2d5e4c0e4dc04919d153aa13af1", genesis_validators_root: "0x9d642dac73058fbf39c0ae41ab1e34e4d889043cb199851ded7095bc99eb4c1e", + genesis_state_root: "0xa48419160f8f146ecaa53d12a5d6e1e6af414a328afdc56b60d5002bb472a077", }; /// The core configuration of a Lighthouse beacon node. @@ -100,6 +102,10 @@ pub enum GenesisStateSource { /// /// The format should be 0x-prefixed ASCII bytes. genesis_validators_root: &'static str, + /// The genesis state root. + /// + /// The format should be 0x-prefixed ASCII bytes. + genesis_state_root: &'static str, }, } @@ -114,7 +120,7 @@ pub struct Eth2NetArchiveAndDirectory<'a> { pub genesis_state_source: GenesisStateSource, } -impl<'a> Eth2NetArchiveAndDirectory<'a> { +impl Eth2NetArchiveAndDirectory<'_> { /// The directory that should be used to store files downloaded for this net. pub fn dir(&self) -> PathBuf { env::var("CARGO_MANIFEST_DIR") diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5971b934e0..c19b32014e 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -3,16 +3,15 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -num-bigint = "0.4.2" +bls = { workspace = true } ethereum_hashing = { workspace = true } hex = { workspace = true } -serde_yaml = { workspace = true } +num-bigint = "0.4.2" serde = { workspace = true } -bls = { workspace = true } +serde_yaml = { workspace = true } [dev-dependencies] base64 = "0.13.0" diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 09cf2072d2..a255e04229 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -7,25 +7,25 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] -zip = { workspace = true } eth2_config = { workspace = true } +zip = { workspace = true } [dev-dependencies] +ethereum_ssz = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } -ethereum_ssz = { workspace = true } [dependencies] -serde_yaml = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } -discv5 = { workspace = true } -reqwest = { workspace = true } -pretty_reqwest_error = { workspace = true } -sha2 = { workspace = true } -url = { workspace = true } -sensitive_url = { workspace = true } -slog = { workspace = true } -logging = { workspace = true } bytes = { workspace = true } +discv5 = { workspace = true } +eth2_config = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } +pretty_reqwest_error = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde_yaml = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +types = { workspace = true } +url = { workspace = true } diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 3d0ffc5b9e..5d5a50574b 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -154,6 +154,32 @@ impl Eth2NetworkConfig { } } + /// Get the genesis state root for this network. + /// + /// `Ok(None)` will be returned if the genesis state is not known. No network requests will be + /// made by this function. This function will not error unless the genesis state configuration + /// is corrupted. + pub fn genesis_state_root(&self) -> Result, String> { + match self.genesis_state_source { + GenesisStateSource::Unknown => Ok(None), + GenesisStateSource::Url { + genesis_state_root, .. + } => Hash256::from_str(genesis_state_root) + .map(Option::Some) + .map_err(|e| format!("Unable to parse genesis state root: {:?}", e)), + GenesisStateSource::IncludedBytes => { + self.get_genesis_state_from_bytes::() + .and_then(|mut state| { + Ok(Some( + state + .canonical_root() + .map_err(|e| format!("Hashing error: {e:?}"))?, + )) + }) + } + } + } + /// Construct a consolidated `ChainSpec` from the YAML config. pub fn chain_spec(&self) -> Result { ChainSpec::from_config::(&self.config).ok_or_else(|| { @@ -185,6 +211,7 @@ impl Eth2NetworkConfig { urls: built_in_urls, checksum, genesis_validators_root, + .. } => { let checksum = Hash256::from_str(checksum).map_err(|e| { format!("Unable to parse genesis state bytes checksum: {:?}", e) @@ -507,6 +534,7 @@ mod tests { urls, checksum, genesis_validators_root, + .. } = net.genesis_state_source { Hash256::from_str(checksum).expect("the checksum must be a valid 32-byte value"); diff --git a/common/eth2_wallet_manager/Cargo.toml b/common/eth2_wallet_manager/Cargo.toml index f471757065..a6eb24c78c 100644 --- a/common/eth2_wallet_manager/Cargo.toml +++ b/common/eth2_wallet_manager/Cargo.toml @@ -3,7 +3,6 @@ name = "eth2_wallet_manager" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2_wallet_manager/src/wallet_manager.rs b/common/eth2_wallet_manager/src/wallet_manager.rs index 3dd419a48b..c988ca4135 100644 --- a/common/eth2_wallet_manager/src/wallet_manager.rs +++ b/common/eth2_wallet_manager/src/wallet_manager.rs @@ -296,10 +296,10 @@ mod tests { ) .expect("should create first wallet"); - let uuid = w.wallet().uuid().clone(); + let uuid = *w.wallet().uuid(); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), 0, "should start wallet with nextaccount 0" ); @@ -308,7 +308,7 @@ mod tests { w.next_validator(WALLET_PASSWORD, &[50; 32], &[51; 32]) .expect("should create validator"); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), i, "should update wallet with nextaccount {}", i @@ -333,54 +333,54 @@ mod tests { let base_dir = dir.path(); let mgr = WalletManager::open(base_dir).unwrap(); - let uuid_a = create_wallet(&mgr, 0).wallet().uuid().clone(); - let uuid_b = create_wallet(&mgr, 1).wallet().uuid().clone(); + let uuid_a = *create_wallet(&mgr, 0).wallet().uuid(); + let uuid_b = *create_wallet(&mgr, 1).wallet().uuid(); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile should exist" ); drop(locked_a); assert!( - !lockfile_path(&base_dir, &uuid_a).exists(), + !lockfile_path(base_dir, &uuid_a).exists(), "lockfile have been cleaned up" ); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); - let locked_b = LockedWallet::open(&base_dir, &uuid_b).expect("should open wallet b"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); + let locked_b = LockedWallet::open(base_dir, &uuid_b).expect("should open wallet b"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile a should exist" ); assert!( - lockfile_path(&base_dir, &uuid_b).exists(), + lockfile_path(base_dir, &uuid_b).exists(), "lockfile b should exist" ); - match LockedWallet::open(&base_dir, &uuid_a) { + match LockedWallet::open(base_dir, &uuid_a) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_a); - LockedWallet::open(&base_dir, &uuid_a) + LockedWallet::open(base_dir, &uuid_a) .expect("should open wallet a after previous instance is dropped"); - match LockedWallet::open(&base_dir, &uuid_b) { + match LockedWallet::open(base_dir, &uuid_b) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_b); - LockedWallet::open(&base_dir, &uuid_b) + LockedWallet::open(base_dir, &uuid_b) .expect("should open wallet a after previous instance is dropped"); } } diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml deleted file mode 100644 index fe966f4a9c..0000000000 --- a/common/lighthouse_metrics/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "lighthouse_metrics" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -prometheus = "0.13.0" diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 3c4f9fe50c..164e3e47a7 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -3,7 +3,6 @@ name = "lighthouse_version" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index f988dd86b1..0751bdadff 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.3.0-", - fallback = "Lighthouse/v5.3.0" + prefix = "Lighthouse/v6.0.1-", + fallback = "Lighthouse/v6.0.1" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index cac6d073f2..b2829a48d8 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -9,7 +9,7 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -19,7 +19,7 @@ sloggers = { workspace = true } take_mut = "0.2.2" tokio = { workspace = true, features = [ "time" ] } tracing = "0.1" +tracing-appender = { workspace = true } tracing-core = { workspace = true } tracing-log = { workspace = true } tracing-subscriber = { workspace = true } -tracing-appender = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 0df03c17d0..7fe7f79506 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,6 +1,4 @@ -use lighthouse_metrics::{ - inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, -}; +use metrics::{inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult}; use slog::Logger; use slog_term::Decorator; use std::io::{Result, Write}; @@ -107,7 +105,7 @@ impl<'a> AlignedRecordDecorator<'a> { } } -impl<'a> Write for AlignedRecordDecorator<'a> { +impl Write for AlignedRecordDecorator<'_> { fn write(&mut self, buf: &[u8]) -> Result { if buf.iter().any(u8::is_ascii_control) { let filtered = buf @@ -126,7 +124,7 @@ impl<'a> Write for AlignedRecordDecorator<'a> { } } -impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> { +impl slog_term::RecordDecorator for AlignedRecordDecorator<'_> { fn reset(&mut self) -> Result<()> { self.message_active = false; self.message_count = 0; diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs index 89a1f4d1f1..5d272adbf5 100644 --- a/common/logging/src/tracing_metrics_layer.rs +++ b/common/logging/src/tracing_metrics_layer.rs @@ -1,6 +1,5 @@ //! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. -use lighthouse_metrics as metrics; use std::sync::LazyLock; use tracing_log::NormalizeEvent; diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index b91e68c518..64fb7b9aad 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -lighthouse_metrics = { workspace = true } libc = "0.2.79" +metrics = { workspace = true } parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 41d8d28291..30313d0672 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -4,7 +4,7 @@ //! https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html //! //! These functions are generally only suitable for Linux systems. -use lighthouse_metrics::*; +use metrics::*; use parking_lot::Mutex; use std::env; use std::os::raw::c_int; @@ -38,60 +38,57 @@ pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(|| <_>::default()); // Metrics for the malloc. For more information, see: // // https://man7.org/linux/man-pages/man3/mallinfo.3.html -pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_arena", "The total amount of memory allocated by means other than mmap(2). \ This figure includes both in-use blocks and blocks on the free list.", ) }); -pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_ordblks", "The number of ordinary (i.e., non-fastbin) free blocks.", ) }); -pub static MALLINFO_SMBLKS: LazyLock> = +pub static MALLINFO_SMBLKS: LazyLock> = LazyLock::new(|| try_create_int_gauge("mallinfo_smblks", "The number of fastbin free blocks.")); -pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblks", "The number of blocks currently allocated using mmap.", ) }); -pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblkhd", "The number of bytes in blocks currently allocated using mmap.", ) }); -pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_fsmblks", "The total number of bytes in fastbin free blocks.", ) }); -pub static MALLINFO_UORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_uordblks", - "The total number of bytes used by in-use allocations.", - ) - }); -pub static MALLINFO_FORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_fordblks", - "The total number of bytes in free blocks.", - ) - }); -pub static MALLINFO_KEEPCOST: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_keepcost", - "The total amount of releasable free space at the top of the heap..", - ) - }); +pub static MALLINFO_UORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_uordblks", + "The total number of bytes used by in-use allocations.", + ) +}); +pub static MALLINFO_FORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_fordblks", + "The total number of bytes in free blocks.", + ) +}); +pub static MALLINFO_KEEPCOST: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_keepcost", + "The total amount of releasable free space at the top of the heap..", + ) +}); /// Calls `mallinfo` and updates Prometheus metrics with the results. pub fn scrape_mallinfo_metrics() { diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index a392a74e8f..0e2e00cb0e 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -7,7 +7,7 @@ //! //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. -use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; +use metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; @@ -15,22 +15,22 @@ use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; // Metrics for jemalloc. -pub static NUM_ARENAS: LazyLock> = +pub static NUM_ARENAS: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use")); -pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { +pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated") }); -pub static BYTES_ACTIVE: LazyLock> = +pub static BYTES_ACTIVE: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active")); -pub static BYTES_MAPPED: LazyLock> = +pub static BYTES_MAPPED: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped")); -pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { +pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata") }); -pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { +pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident") }); -pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { +pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained") }); diff --git a/common/metrics/Cargo.toml b/common/metrics/Cargo.toml new file mode 100644 index 0000000000..a7f4f4b967 --- /dev/null +++ b/common/metrics/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "metrics" +version = "0.2.0" +edition = { workspace = true } + +[dependencies] +prometheus = { workspace = true } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/metrics/src/lib.rs similarity index 97% rename from common/lighthouse_metrics/src/lib.rs rename to common/metrics/src/lib.rs index 2a1e99defa..22513af8bc 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/metrics/src/lib.rs @@ -20,10 +20,10 @@ //! ## Example //! //! ```rust -//! use lighthouse_metrics::*; +//! use metrics::*; //! use std::sync::LazyLock; //! -//! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`. +//! // These metrics are "magically" linked to the global registry defined in `metrics`. //! pub static RUN_COUNT: LazyLock> = LazyLock::new(|| try_create_int_counter( //! "runs_total", //! "Total number of runs" @@ -283,6 +283,14 @@ pub fn stop_timer(timer: Option) { } } +/// Stops a timer created with `start_timer(..)`. +/// +/// Return the duration that the timer was running for, or 0.0 if it was `None` due to incorrect +/// initialisation. +pub fn stop_timer_with_duration(timer: Option) -> Duration { + Duration::from_secs_f64(timer.map_or(0.0, |t| t.stop_and_record())) +} + pub fn observe_vec(vec: &Result, name: &[&str], value: f64) { if let Some(h) = get_histogram(vec, name) { h.observe(value) diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 55f18edd52..5008c86e85 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -3,19 +3,18 @@ name = "monitoring_api" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { workspace = true } -task_executor = { workspace = true } -tokio = { workspace = true } eth2 = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } +regex = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } slog = { workspace = true } store = { workspace = true } -regex = { workspace = true } -sensitive_url = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index e157d82c11..2f6c820f56 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,5 +1,5 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; -use lighthouse_metrics::{MetricFamily, MetricType}; +use metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; use std::path::Path; @@ -155,7 +155,7 @@ fn get_value(mf: &MetricFamily) -> Option { /// Collects all metrics and returns a `serde_json::Value` object with the required metrics /// from the metrics hashmap. pub fn gather_metrics(metrics_map: &HashMap) -> Option { - let metric_families = lighthouse_metrics::gather(); + let metric_families = metrics::gather(); let mut res = serde_json::Map::with_capacity(metrics_map.len()); for mf in metric_families.iter() { let metric_name = mf.get_name(); diff --git a/common/oneshot_broadcast/Cargo.toml b/common/oneshot_broadcast/Cargo.toml index 12c9b40bc8..8a358ef851 100644 --- a/common/oneshot_broadcast/Cargo.toml +++ b/common/oneshot_broadcast/Cargo.toml @@ -2,7 +2,6 @@ name = "oneshot_broadcast" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml index dc79832cd3..4311601bcd 100644 --- a/common/pretty_reqwest_error/Cargo.toml +++ b/common/pretty_reqwest_error/Cargo.toml @@ -2,7 +2,6 @@ name = "pretty_reqwest_error" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index d218c8d93a..ff56209722 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -3,9 +3,8 @@ name = "sensitive_url" version = "0.1.0" authors = ["Mac L "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -url = { workspace = true } serde = { workspace = true } +url = { workspace = true } diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index 13bcf006a9..2e1982efb1 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +types = { workspace = true } diff --git a/common/slot_clock/src/metrics.rs b/common/slot_clock/src/metrics.rs index 24023c9ed7..ec95e90d4a 100644 --- a/common/slot_clock/src/metrics.rs +++ b/common/slot_clock/src/metrics.rs @@ -1,5 +1,5 @@ use crate::SlotClock; -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::{EthSpec, Slot}; diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index be339f2779..034683f72e 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -5,7 +5,7 @@ edition = { workspace = true } [dependencies] lighthouse_network = { workspace = true } -types = { workspace = true } -sysinfo = { workspace = true } -serde = { workspace = true } parking_lot = { workspace = true } +serde = { workspace = true } +sysinfo = { workspace = true } +types = { workspace = true } diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index feec08af84..9f351e943b 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -198,23 +198,61 @@ pub fn observe_system_health_vc( } } +/// The current state of Lighthouse NAT/connectivity. +#[derive(Serialize, Deserialize)] +pub struct NatState { + /// Contactable on discovery ipv4. + discv5_ipv4: bool, + /// Contactable on discovery ipv6. + discv5_ipv6: bool, + /// Contactable on libp2p ipv4. + libp2p_ipv4: bool, + /// Contactable on libp2p ipv6. + libp2p_ipv6: bool, +} + +impl NatState { + pub fn is_anything_open(&self) -> bool { + self.discv5_ipv4 || self.discv5_ipv6 || self.libp2p_ipv4 || self.libp2p_ipv6 + } +} + /// Observes if NAT traversal is possible. -pub fn observe_nat() -> bool { - let discv5_nat = lighthouse_network::metrics::get_int_gauge( +pub fn observe_nat() -> NatState { + let discv5_ipv4 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["discv5"], + &["discv5_ipv4"], ) .map(|g| g.get() == 1) .unwrap_or_default(); - let libp2p_nat = lighthouse_network::metrics::get_int_gauge( + let discv5_ipv6 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["discv5_ipv6"], ) .map(|g| g.get() == 1) .unwrap_or_default(); - discv5_nat || libp2p_nat + let libp2p_ipv4 = lighthouse_network::metrics::get_int_gauge( + &lighthouse_network::metrics::NAT_OPEN, + &["libp2p_ipv4"], + ) + .map(|g| g.get() == 1) + .unwrap_or_default(); + + let libp2p_ipv6 = lighthouse_network::metrics::get_int_gauge( + &lighthouse_network::metrics::NAT_OPEN, + &["libp2p_ipv6"], + ) + .map(|g| g.get() == 1) + .unwrap_or_default(); + + NatState { + discv5_ipv4, + discv5_ipv6, + libp2p_ipv4, + libp2p_ipv6, + } } /// Observes the Beacon Node system health. @@ -242,7 +280,7 @@ pub fn observe_system_health_bn( .unwrap_or_else(|| (String::from("None"), 0, 0)); // Determine if the NAT is open or not. - let nat_open = observe_nat(); + let nat_open = observe_nat().is_anything_open(); SystemHealthBN { system_health, diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 7928d4a3c9..c1ac4b55a9 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -4,11 +4,17 @@ version = "0.1.0" authors = ["Sigma Prime "] edition = { workspace = true } +[features] +default = ["slog"] +slog = ["dep:slog", "dep:sloggers", "dep:logging"] +tracing = ["dep:tracing"] + [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } futures = { workspace = true } -lighthouse_metrics = { workspace = true } -sloggers = { workspace = true } -logging = { workspace = true } +logging = { workspace = true, optional = true } +metrics = { workspace = true } +slog = { workspace = true, optional = true } +sloggers = { workspace = true, optional = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } +tracing = { workspace = true, optional = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index d6edfd3121..92ddb7c0be 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,14 +1,20 @@ mod metrics; +#[cfg(not(feature = "tracing"))] pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; -use slog::{debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; pub use tokio::task::JoinHandle; +// Set up logging framework +#[cfg(not(feature = "tracing"))] +use slog::{debug, o}; +#[cfg(feature = "tracing")] +use tracing::debug; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -79,7 +85,7 @@ pub struct TaskExecutor { /// /// The task must provide a reason for shutting down. signal_tx: Sender, - + #[cfg(not(feature = "tracing"))] log: slog::Logger, } @@ -94,18 +100,20 @@ impl TaskExecutor { pub fn new>( handle: T, exit: async_channel::Receiver<()>, - log: slog::Logger, + #[cfg(not(feature = "tracing"))] log: slog::Logger, signal_tx: Sender, ) -> Self { Self { handle_provider: handle.into(), exit, signal_tx, + #[cfg(not(feature = "tracing"))] log, } } /// Clones the task executor adding a service name. + #[cfg(not(feature = "tracing"))] pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { handle_provider: self.handle_provider.clone(), @@ -115,6 +123,16 @@ impl TaskExecutor { } } + /// Clones the task executor adding a service name. + #[cfg(feature = "tracing")] + pub fn clone(&self) -> Self { + TaskExecutor { + handle_provider: self.handle_provider.clone(), + exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), + } + } + /// A convenience wrapper for `Self::spawn` which ignores a `Result` as long as both `Ok`/`Err` /// are of type `()`. /// @@ -150,10 +168,13 @@ impl TaskExecutor { drop(timer); }); } else { + #[cfg(not(feature = "tracing"))] debug!( self.log, "Couldn't spawn monitor task. Runtime shutting down" - ) + ); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn monitor task. Runtime shutting down"); } } @@ -175,7 +196,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. This function does not wrap the task in an `async-channel::Receiver` /// like [spawn](#method.spawn). /// The caller of this function is responsible for wrapping up the task with an `async-channel::Receiver` to - /// ensure that the task gets canceled appropriately. + /// ensure that the task gets cancelled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// /// This is useful in cases where the future to be spawned needs to do additional cleanup work when @@ -197,7 +218,10 @@ impl TaskExecutor { if let Some(handle) = self.handle() { handle.spawn(future); } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); } } } @@ -215,7 +239,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. - /// The task is canceled when the corresponding async-channel is dropped. + /// The task is cancelled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. pub fn spawn_handle( @@ -224,6 +248,8 @@ impl TaskExecutor { name: &'static str, ) -> Option>> { let exit = self.exit(); + + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { @@ -234,12 +260,12 @@ impl TaskExecutor { Some(handle.spawn(async move { futures::pin_mut!(exit); let result = match future::select(Box::pin(task), exit).await { - future::Either::Left((value, _)) => { - trace!(log, "Async task completed"; "task" => name); - Some(value) - } + future::Either::Left((value, _)) => Some(value), future::Either::Right(_) => { + #[cfg(not(feature = "tracing"))] debug!(log, "Async task shutdown, exit received"; "task" => name); + #[cfg(feature = "tracing")] + debug!(task = name, "Async task shutdown, exit received"); None } }; @@ -247,7 +273,10 @@ impl TaskExecutor { result })) } else { - debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(not(feature = "tracing"))] + debug!(log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); None } } else { @@ -270,6 +299,7 @@ impl TaskExecutor { F: FnOnce() -> R + Send + 'static, R: Send + 'static, { + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); @@ -278,19 +308,22 @@ impl TaskExecutor { let join_handle = if let Some(handle) = self.handle() { handle.spawn_blocking(task) } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); return None; }; let future = async move { let result = match join_handle.await { - Ok(result) => { - trace!(log, "Blocking task completed"; "task" => name); - Ok(result) - } - Err(e) => { - debug!(log, "Blocking task ended unexpectedly"; "error" => %e); - Err(e) + Ok(result) => Ok(result), + Err(error) => { + #[cfg(not(feature = "tracing"))] + debug!(log, "Blocking task ended unexpectedly"; "error" => %error); + #[cfg(feature = "tracing")] + debug!(%error, "Blocking task ended unexpectedly"); + Err(error) } }; drop(timer); @@ -321,32 +354,48 @@ impl TaskExecutor { ) -> Option { let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let handle = self.handle()?; let exit = self.exit(); - + #[cfg(not(feature = "tracing"))] debug!( log, "Starting block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!(name, "Starting block_on task"); + handle.block_on(async { let output = tokio::select! { output = future => { + #[cfg(not(feature = "tracing"))] debug!( log, "Completed block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!( + name, + "Completed block_on task" + ); Some(output) }, _ = exit => { + #[cfg(not(feature = "tracing"))] debug!( log, "Cancelled block_on task"; "name" => name, ); + #[cfg(feature = "tracing")] + debug!( + name, + "Cancelled block_on task" + ); None } }; @@ -376,6 +425,7 @@ impl TaskExecutor { } /// Returns a reference to the logger. + #[cfg(not(feature = "tracing"))] pub fn log(&self) -> &slog::Logger { &self.log } diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index a40bfdf4e7..bd4d6a50b9 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -1,5 +1,5 @@ /// Handles async task metrics -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static ASYNC_TASKS_COUNT: LazyLock> = LazyLock::new(|| { diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index 79308797a4..b38d5ef63a 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait" proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 95dbf59186..2d771cd600 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -2,7 +2,6 @@ name = "unused_port" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index ae8742fe07..773431c93c 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -6,21 +6,20 @@ edition = { workspace = true } [features] insecure_keys = [] - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] bls = { workspace = true } +deposit_contract = { workspace = true } +derivative = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } -types = { workspace = true } -rand = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } hex = { workspace = true } -derivative = { workspace = true } lockfile = { workspace = true } -directory = { workspace = true } +rand = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/common/validator_dir/src/insecure_keys.rs b/common/validator_dir/src/insecure_keys.rs index f8cc51da63..83720bb58c 100644 --- a/common/validator_dir/src/insecure_keys.rs +++ b/common/validator_dir/src/insecure_keys.rs @@ -15,7 +15,7 @@ use types::test_utils::generate_deterministic_keypair; /// A very weak password with which to encrypt the keystores. pub const INSECURE_PASSWORD: &[u8] = &[50; 51]; -impl<'a> Builder<'a> { +impl Builder<'_> { /// Generate the voting keystore using a deterministic, well-known, **unsafe** keypair. /// /// **NEVER** use these keys in production! diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 84f5ce5f18..4a3cde54a9 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "warp_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } -eth2 = { workspace = true } -types = { workspace = true } beacon_chain = { workspace = true } -state_processing = { workspace = true } +bytes = { workspace = true } +eth2 = { workspace = true } +headers = "0.3.2" +metrics = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } -tokio = { workspace = true } -headers = "0.3.2" -lighthouse_metrics = { workspace = true } serde_array_query = "0.1.0" -bytes = { workspace = true } +serde_json = { workspace = true } +state_processing = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +warp = { workspace = true } diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 505d277583..fabcf93650 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -1,5 +1,5 @@ use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use lighthouse_metrics::*; +use metrics::*; use std::sync::LazyLock; pub static PROCESS_NUM_THREADS: LazyLock> = LazyLock::new(|| { diff --git a/consensus/fixed_bytes/Cargo.toml b/consensus/fixed_bytes/Cargo.toml index e5201a0455..ab29adfb1b 100644 --- a/consensus/fixed_bytes/Cargo.toml +++ b/consensus/fixed_bytes/Cargo.toml @@ -3,7 +3,6 @@ name = "fixed_bytes" version = "0.1.0" authors = ["Eitan Seri-Levi "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 4a4f6e9086..3bd18e922a 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -3,17 +3,16 @@ name = "fork_choice" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -types = { workspace = true } -state_processing = { workspace = true } -proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } +proto_array = { workspace = true } slog = { workspace = true } +state_processing = { workspace = true } +types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } diff --git a/consensus/fork_choice/src/metrics.rs b/consensus/fork_choice/src/metrics.rs index eb0dbf435e..b5cda2f587 100644 --- a/consensus/fork_choice/src/metrics.rs +++ b/consensus/fork_choice/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::EthSpec; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 29265e34e4..ef017159a0 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1156,18 +1156,20 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { }; // recreate the chain exactly - ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) - .await - .unwrap() - .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) - .await - .unwrap() - .apply_blocks(1) - .await - .assert_finalized_epoch(5) - .assert_shutdown_signal_not_sent(); + Box::pin( + ForkChoiceTest::new_with_chain_config(chain_config.clone()) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .skip_slots(E::slots_per_epoch() as usize) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await + .unwrap() + .apply_blocks(1), + ) + .await + .assert_finalized_epoch(5) + .assert_shutdown_signal_not_sent(); } #[tokio::test] diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index e99d1af8e5..c639dfce8d 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] -yaml-rust2 = "0.8" hex = { workspace = true } +yaml-rust2 = "0.8" diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 99f98cf545..bd6757c0fa 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -9,10 +9,10 @@ name = "proto_array" path = "src/bin.rs" [dependencies] -types = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } -safe_arith = { workspace = true } superstruct = { workspace = true } +types = { workspace = true } diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml index 6f2e4b811c..9ac9fe28d3 100644 --- a/consensus/safe_arith/Cargo.toml +++ b/consensus/safe_arith/Cargo.toml @@ -3,7 +3,6 @@ name = "safe_arith" version = "0.1.0" authors = ["Michael Sproul "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 7b7c6eb0c4..502ffe3cf6 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -5,30 +5,30 @@ authors = ["Paul Hauner ", "Michael Sproul BlockReplayer<'a, E, Error, StateRootIterDefault> +impl BlockReplayer<'_, E, Error, StateRootIterDefault> where E: EthSpec, Error: From, diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index b131f7679a..842adce431 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -103,14 +103,14 @@ pub mod attesting_indices_electra { let committee_count_per_slot = committees.len() as u64; let mut participant_count = 0; - for index in committee_indices { + for committee_index in committee_indices { let beacon_committee = committees - .get(index as usize) - .ok_or(Error::NoCommitteeFound(index))?; + .get(committee_index as usize) + .ok_or(Error::NoCommitteeFound(committee_index))?; // This check is new to the spec's `process_attestation` in Electra. - if index >= committee_count_per_slot { - return Err(BeaconStateError::InvalidCommitteeIndex(index)); + if committee_index >= committee_count_per_slot { + return Err(BeaconStateError::InvalidCommitteeIndex(committee_index)); } participant_count.safe_add_assign(beacon_committee.committee.len() as u64)?; let committee_attesters = beacon_committee @@ -127,6 +127,12 @@ pub mod attesting_indices_electra { }) .collect::>(); + // Require at least a single non-zero bit for each attesting committee bitfield. + // This check is new to the spec's `process_attestation` in Electra. + if committee_attesters.is_empty() { + return Err(BeaconStateError::EmptyCommittee); + } + attesting_indices.extend(committee_attesters); committee_offset.safe_add_assign(beacon_committee.committee.len())?; } diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index af843b3acb..1fdfe802c4 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -1,10 +1,10 @@ /// A collection of all functions that mutates the `ProgressiveBalancesCache`. use crate::metrics::{ - PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + self, PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, }; use crate::{BlockProcessingError, EpochProcessingError}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use types::{ is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EpochTotalBalances, EthSpec, ParticipationFlags, ProgressiveBalancesCache, Validator, @@ -21,6 +21,8 @@ pub fn initialize_progressive_balances_cache( return Ok(()); } + let _timer = metrics::start_timer(&metrics::BUILD_PROGRESSIVE_BALANCES_CACHE_TIME); + // Calculate the total flag balances for previous & current epoch in a single iteration. // This calculates `get_total_balance(unslashed_participating_indices(..))` for each flag in // the current and previous epoch. diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 5af5e639fd..dc1d79709e 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -1,6 +1,7 @@ use crate::common::altair::BaseRewardPerIncrement; use crate::common::base::SqrtTotalActiveBalance; use crate::common::{altair, base}; +use crate::metrics; use safe_arith::SafeArith; use types::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; use types::{ @@ -138,6 +139,8 @@ pub fn initialize_epoch_cache( return Ok(()); } + let _timer = metrics::start_timer(&metrics::BUILD_EPOCH_CACHE_TIME); + let current_epoch = state.current_epoch(); let next_epoch = state.next_epoch().map_err(EpochCacheError::BeaconState)?; let decision_block_root = state diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index e6fe483776..8772dbd4f8 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* @@ -41,6 +41,20 @@ pub static PROCESS_EPOCH_TIME: LazyLock> = LazyLock::new(|| { "Time required for process_epoch", ) }); +pub static BUILD_EPOCH_CACHE_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_state_processing_epoch_cache", + "Time required to build the epoch cache", + ) +}); +pub static BUILD_PROGRESSIVE_BALANCES_CACHE_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_state_processing_progressive_balances_cache", + "Time required to build the progressive balances cache", + ) + }); + /* * Participation Metrics (progressive balances) */ diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e85a8b7669..0ef394d517 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -516,6 +516,7 @@ pub fn get_expected_withdrawals( // Consume pending partial withdrawals let partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + let mut partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -548,8 +549,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } + partial_withdrawals_count.safe_add_assign(1)?; } - Some(withdrawals.len()) + Some(partial_withdrawals_count) } else { None }; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index a53dc15126..22d8592364 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -7,7 +7,6 @@ use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; use types::typenum::U33; -use types::validator::is_compounding_withdrawal_credential; pub fn process_operations>( state: &mut BeaconState, @@ -378,7 +377,7 @@ pub fn process_deposits( if state.eth1_deposit_index() < eth1_deposit_index_limit { let expected_deposit_len = std::cmp::min( E::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, + eth1_deposit_index_limit.safe_sub(state.eth1_deposit_index())?, ); block_verify!( deposits.len() as u64 == expected_deposit_len, @@ -450,39 +449,46 @@ pub fn apply_deposit( if let Some(index) = validator_index { // [Modified in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { index, amount })?; - - let validator = state - .validators() - .get(index as usize) - .ok_or(BeaconStateError::UnknownValidator(index as usize))?; - - if is_compounding_withdrawal_credential(deposit_data.withdrawal_credentials, spec) - && validator.has_eth1_withdrawal_credential(spec) - && is_valid_deposit_signature(&deposit_data, spec).is_ok() - { - state.switch_to_compounding_validator(index as usize, spec)?; - } + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request + })?; } else { // Update the existing validator balance. increase_balance(state, index as usize, amount)?; } - } else { + } + // New validator + else { // The signature should be checked for new validators. Return early for a bad // signature. if is_valid_deposit_signature(&deposit_data, spec).is_err() { return Ok(()); } - state.add_validator_to_registry(&deposit_data, spec)?; - let new_validator_index = state.validators().len().safe_sub(1)? as u64; + state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + if state.fork_name_unchecked() >= ForkName::Electra { + 0 + } else { + amount + }, + spec, + )?; // [New in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index, + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request })?; } } @@ -596,13 +602,18 @@ pub fn process_deposit_requests( if state.deposit_requests_start_index()? == spec.unset_deposit_requests_start_index { *state.deposit_requests_start_index_mut()? = request.index } - let deposit_data = DepositData { - pubkey: request.pubkey, - withdrawal_credentials: request.withdrawal_credentials, - amount: request.amount, - signature: request.signature.clone().into(), - }; - apply_deposit(state, deposit_data, None, false, spec)? + let slot = state.slot(); + + // [New in Electra:EIP7251] + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: request.pubkey, + withdrawal_credentials: request.withdrawal_credentials, + amount: request.amount, + signature: request.signature.clone(), + slot, + })?; + } } Ok(()) @@ -621,11 +632,84 @@ pub fn process_consolidation_requests( Ok(()) } +fn is_valid_switch_to_compounding_request( + state: &BeaconState, + consolidation_request: &ConsolidationRequest, + spec: &ChainSpec, +) -> Result { + // Switch to compounding requires source and target be equal + if consolidation_request.source_pubkey != consolidation_request.target_pubkey { + return Ok(false); + } + + // Verify pubkey exists + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist + return Ok(false); + }; + + let source_validator = state.get_validator(source_index)?; + // Verify the source withdrawal credentials + // Note: We need to specifically check for eth1 withdrawal credentials here + // If the validator is already compounding, the compounding request is not valid. + if let Some(withdrawal_address) = source_validator + .has_eth1_withdrawal_credential(spec) + .then(|| { + source_validator + .withdrawal_credentials + .as_slice() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + { + if withdrawal_address != consolidation_request.source_address { + return Ok(false); + } + } else { + // Source doesn't have eth1 withdrawal credentials + return Ok(false); + } + + // Verify the source is active + let current_epoch = state.current_epoch(); + if !source_validator.is_active_at(current_epoch) { + return Ok(false); + } + // Verify exits for source has not been initiated + if source_validator.exit_epoch != spec.far_future_epoch { + return Ok(false); + } + + Ok(true) +} + pub fn process_consolidation_request( state: &mut BeaconState, consolidation_request: &ConsolidationRequest, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if is_valid_switch_to_compounding_request(state, consolidation_request, spec)? { + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist. This is unreachable as `is_valid_switch_to_compounding_request` + // will return false in that case. + return Ok(()); + }; + state.switch_to_compounding_validator(source_index, spec)?; + return Ok(()); + } + + // Verify that source != target, so a consolidation cannot be used as an exit. + if consolidation_request.source_pubkey == consolidation_request.target_pubkey { + return Ok(()); + } + // If the pending consolidations queue is full, consolidation requests are ignored if state.pending_consolidations()?.len() == E::PendingConsolidationsLimit::to_usize() { return Ok(()); @@ -649,10 +733,6 @@ pub fn process_consolidation_request( // target validator doesn't exist return Ok(()); }; - // Verify that source != target, so a consolidation cannot be used as an exit. - if source_index == target_index { - return Ok(()); - } let source_validator = state.get_validator(source_index)?; // Verify the source withdrawal credentials @@ -699,5 +779,10 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; + let target_validator = state.get_validator(target_index)?; + // Churn any target excess active balance of target and raise its max + if target_validator.has_eth1_withdrawal_credential(spec) { + state.switch_to_compounding_validator(target_index, spec)?; + } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index b6c9dbea52..f45c55a7ac 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -28,6 +28,7 @@ pub enum EpochProcessingError { SinglePassMissingActivationQueue, MissingEarliestExitEpoch, MissingExitBalanceToConsume, + PendingDepositsLogicError, } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index fcb480a37c..904e68e368 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -4,6 +4,7 @@ use crate::{ update_progressive_balances_cache::initialize_progressive_balances_cache, }, epoch_cache::{initialize_epoch_cache, PreEpochCache}, + per_block_processing::is_valid_deposit_signature, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; @@ -16,9 +17,9 @@ use types::{ TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, - ExitCache, ForkName, List, ParticipationFlags, PendingBalanceDeposit, ProgressiveBalancesCache, - RelativeEpoch, Unsigned, Validator, + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, + ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, }; pub struct SinglePassConfig { @@ -26,7 +27,7 @@ pub struct SinglePassConfig { pub rewards_and_penalties: bool, pub registry_updates: bool, pub slashings: bool, - pub pending_balance_deposits: bool, + pub pending_deposits: bool, pub pending_consolidations: bool, pub effective_balance_updates: bool, } @@ -44,7 +45,7 @@ impl SinglePassConfig { rewards_and_penalties: true, registry_updates: true, slashings: true, - pending_balance_deposits: true, + pending_deposits: true, pending_consolidations: true, effective_balance_updates: true, } @@ -56,7 +57,7 @@ impl SinglePassConfig { rewards_and_penalties: false, registry_updates: false, slashings: false, - pending_balance_deposits: false, + pending_deposits: false, pending_consolidations: false, effective_balance_updates: false, } @@ -85,15 +86,17 @@ struct SlashingsContext { penalty_per_effective_balance_increment: u64, } -struct PendingBalanceDepositsContext { +struct PendingDepositsContext { /// The value to set `next_deposit_index` to *after* processing completes. next_deposit_index: usize, /// The value to set `deposit_balance_to_consume` to *after* processing completes. deposit_balance_to_consume: u64, /// Total balance increases for each validator due to pending balance deposits. validator_deposits_to_process: HashMap, - /// The deposits to append to `pending_balance_deposits` after processing all applicable deposits. - deposits_to_postpone: Vec, + /// The deposits to append to `pending_deposits` after processing all applicable deposits. + deposits_to_postpone: Vec, + /// New validators to be added to the state *after* processing completes. + new_validator_deposits: Vec, } struct EffectiveBalancesContext { @@ -138,6 +141,7 @@ pub fn process_epoch_single_pass( state.build_exit_cache(spec)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.update_pubkey_cache()?; let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -163,12 +167,11 @@ pub fn process_epoch_single_pass( let slashings_ctxt = &SlashingsContext::new(state, state_ctxt, spec)?; let mut next_epoch_cache = PreEpochCache::new_for_next_epoch(state)?; - let pending_balance_deposits_ctxt = - if fork_name.electra_enabled() && conf.pending_balance_deposits { - Some(PendingBalanceDepositsContext::new(state, spec)?) - } else { - None - }; + let pending_deposits_ctxt = if fork_name.electra_enabled() && conf.pending_deposits { + Some(PendingDepositsContext::new(state, spec, &conf)?) + } else { + None + }; let mut earliest_exit_epoch = state.earliest_exit_epoch().ok(); let mut exit_balance_to_consume = state.exit_balance_to_consume().ok(); @@ -303,9 +306,9 @@ pub fn process_epoch_single_pass( process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } - // `process_pending_balance_deposits` - if let Some(pending_balance_deposits_ctxt) = &pending_balance_deposits_ctxt { - process_pending_balance_deposits_for_validator( + // `process_pending_deposits` + if let Some(pending_balance_deposits_ctxt) = &pending_deposits_ctxt { + process_pending_deposits_for_validator( &mut balance, validator_info, pending_balance_deposits_ctxt, @@ -342,20 +345,84 @@ pub fn process_epoch_single_pass( // Finish processing pending balance deposits if relevant. // // This *could* be reordered after `process_pending_consolidations` which pushes only to the end - // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used + // of the `pending_deposits` list. But we may as well preserve the write ordering used // by the spec and do this first. - if let Some(ctxt) = pending_balance_deposits_ctxt { - let mut new_pending_balance_deposits = List::try_from_iter( + if let Some(ctxt) = pending_deposits_ctxt { + let mut new_balance_deposits = List::try_from_iter( state - .pending_balance_deposits()? + .pending_deposits()? .iter_from(ctxt.next_deposit_index)? .cloned(), )?; for deposit in ctxt.deposits_to_postpone { - new_pending_balance_deposits.push(deposit)?; + new_balance_deposits.push(deposit)?; } - *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; + *state.pending_deposits_mut()? = new_balance_deposits; *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; + + // `new_validator_deposits` may contain multiple deposits with the same pubkey where + // the first deposit creates the new validator and the others are topups. + // Each item in the vec is a (pubkey, validator_index) + let mut added_validators = Vec::new(); + for deposit in ctxt.new_validator_deposits { + let deposit_data = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature, + }; + // Only check the signature if this is the first deposit for the validator, + // following the logic from `apply_pending_deposit` in the spec. + if let Some(validator_index) = state.get_validator_index(&deposit_data.pubkey)? { + state + .get_balance_mut(validator_index)? + .safe_add_assign(deposit_data.amount)?; + } else if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + // Apply the new deposit to the state + let validator_index = state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + deposit_data.amount, + spec, + )?; + added_validators.push((deposit_data.pubkey, validator_index)); + } + } + if conf.effective_balance_updates { + // Re-process effective balance updates for validators affected by top-up of new validators. + let ( + validators, + balances, + _, + current_epoch_participation, + _, + progressive_balances, + _, + _, + ) = state.mutable_validator_fields()?; + for (_, validator_index) in added_validators.iter() { + let balance = *balances + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let mut validator = validators + .get_cow(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let validator_current_epoch_participation = *current_epoch_participation + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + process_single_effective_balance_update( + *validator_index, + balance, + &mut validator, + validator_current_epoch_participation, + &mut next_epoch_cache, + progressive_balances, + effective_balances_ctxt, + state_ctxt, + spec, + )?; + } + } } // Process consolidations outside the single-pass loop, as they depend on balances for multiple @@ -819,8 +886,12 @@ fn process_single_slashing( Ok(()) } -impl PendingBalanceDepositsContext { - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { +impl PendingDepositsContext { + fn new( + state: &BeaconState, + spec: &ChainSpec, + config: &SinglePassConfig, + ) -> Result { let available_for_processing = state .deposit_balance_to_consume()? .safe_add(state.get_activation_exit_churn_limit(spec)?)?; @@ -830,10 +901,31 @@ impl PendingBalanceDepositsContext { let mut next_deposit_index = 0; let mut validator_deposits_to_process = HashMap::new(); let mut deposits_to_postpone = vec![]; + let mut new_validator_deposits = vec![]; + let mut is_churn_limit_reached = false; + let finalized_slot = state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); - let pending_balance_deposits = state.pending_balance_deposits()?; + let pending_deposits = state.pending_deposits()?; - for deposit in pending_balance_deposits.iter() { + for deposit in pending_deposits.iter() { + // Do not process deposit requests if the Eth1 bridge deposits are not yet applied. + if deposit.slot > spec.genesis_slot + && state.eth1_deposit_index() < state.deposit_requests_start_index()? + { + break; + } + // Do not process is deposit slot has not been finalized. + if deposit.slot > finalized_slot { + break; + } + // Do not process if we have reached the limit for the number of deposits + // processed in an epoch. + if next_deposit_index >= E::max_pending_deposits_per_epoch() { + break; + } // We have to do a bit of indexing into `validators` here, but I can't see any way // around that without changing the spec. // @@ -844,48 +936,70 @@ impl PendingBalanceDepositsContext { // take, just whether it is non-default. Nor do we need to know the value of // `withdrawable_epoch`, because `next_epoch <= withdrawable_epoch` will evaluate to // `true` both for the actual value & the default placeholder value (`FAR_FUTURE_EPOCH`). - let validator = state.get_validator(deposit.index as usize)?; - let already_exited = validator.exit_epoch < spec.far_future_epoch; - // In the spec process_registry_updates is called before process_pending_balance_deposits - // so we must account for process_registry_updates ejecting the validator for low balance - // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective - // balance update does not happen until *after* the registry update, so we don't need to - // account for changes to the effective balance that would push it below the ejection - // balance here. - let will_be_exited = validator.is_active_at(current_epoch) - && validator.effective_balance <= spec.ejection_balance; - if already_exited || will_be_exited { - if next_epoch <= validator.withdrawable_epoch { - deposits_to_postpone.push(deposit.clone()); - } else { - // Deposited balance will never become active. Increase balance but do not - // consume churn. - validator_deposits_to_process - .entry(deposit.index as usize) - .or_insert(0) - .safe_add_assign(deposit.amount)?; - } - } else { - // Deposit does not fit in the churn, no more deposit processing in this epoch. - if processed_amount.safe_add(deposit.amount)? > available_for_processing { - break; - } - // Deposit fits in the churn, process it. Increase balance and consume churn. + let mut is_validator_exited = false; + let mut is_validator_withdrawn = false; + let opt_validator_index = state.pubkey_cache().get(&deposit.pubkey); + if let Some(validator_index) = opt_validator_index { + let validator = state.get_validator(validator_index)?; + let already_exited = validator.exit_epoch < spec.far_future_epoch; + // In the spec process_registry_updates is called before process_pending_deposits + // so we must account for process_registry_updates ejecting the validator for low balance + // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective + // balance update does not happen until *after* the registry update, so we don't need to + // account for changes to the effective balance that would push it below the ejection + // balance here. + // Note: we only consider this if registry_updates are enabled in the config. + // EF tests require us to run epoch_processing functions in isolation. + let will_be_exited = config.registry_updates + && (validator.is_active_at(current_epoch) + && validator.effective_balance <= spec.ejection_balance); + is_validator_exited = already_exited || will_be_exited; + is_validator_withdrawn = validator.withdrawable_epoch < next_epoch; + } + + if is_validator_withdrawn { + // Deposited balance will never become active. Queue a balance increase but do not + // consume churn. Validator index must be known if the validator is known to be + // withdrawn (see calculation of `is_validator_withdrawn` above). + let validator_index = + opt_validator_index.ok_or(Error::PendingDepositsLogicError)?; validator_deposits_to_process - .entry(deposit.index as usize) + .entry(validator_index) .or_insert(0) .safe_add_assign(deposit.amount)?; + } else if is_validator_exited { + // Validator is exiting, postpone the deposit until after withdrawable epoch + deposits_to_postpone.push(deposit.clone()); + } else { + // Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch. + is_churn_limit_reached = + processed_amount.safe_add(deposit.amount)? > available_for_processing; + if is_churn_limit_reached { + break; + } processed_amount.safe_add_assign(deposit.amount)?; + + // Deposit fits in the churn, process it. Increase balance and consume churn. + if let Some(validator_index) = state.pubkey_cache().get(&deposit.pubkey) { + validator_deposits_to_process + .entry(validator_index) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + } else { + // The `PendingDeposit` is for a new validator + new_validator_deposits.push(deposit.clone()); + } } // Regardless of how the deposit was handled, we move on in the queue. next_deposit_index.safe_add_assign(1)?; } - let deposit_balance_to_consume = if next_deposit_index == pending_balance_deposits.len() { - 0 - } else { + // Accumulate churn only if the churn limit has been hit. + let deposit_balance_to_consume = if is_churn_limit_reached { available_for_processing.safe_sub(processed_amount)? + } else { + 0 }; Ok(Self { @@ -893,14 +1007,15 @@ impl PendingBalanceDepositsContext { deposit_balance_to_consume, validator_deposits_to_process, deposits_to_postpone, + new_validator_deposits, }) } } -fn process_pending_balance_deposits_for_validator( +fn process_pending_deposits_for_validator( balance: &mut Cow, validator_info: &ValidatorInfo, - pending_balance_deposits_ctxt: &PendingBalanceDepositsContext, + pending_balance_deposits_ctxt: &PendingDepositsContext, ) -> Result<(), Error> { if let Some(deposit_amount) = pending_balance_deposits_ctxt .validator_deposits_to_process @@ -941,21 +1056,20 @@ fn process_pending_consolidations( break; } - // Calculate the active balance while we have the source validator loaded. This is a safe - // reordering. - let source_balance = *state - .balances() - .get(source_index) - .ok_or(BeaconStateError::UnknownValidator(source_index))?; - let active_balance = - source_validator.get_active_balance(source_balance, spec, state_ctxt.fork_name); - - // Churn any target excess active balance of target and raise its max. - state.switch_to_compounding_validator(target_index, spec)?; + // Calculate the consolidated balance + let max_effective_balance = + source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); + let source_effective_balance = std::cmp::min( + *state + .balances() + .get(source_index) + .ok_or(BeaconStateError::UnknownValidator(source_index))?, + max_effective_balance, + ); // Move active balance to target. Excess balance is withdrawable. - decrease_balance(state, source_index, active_balance)?; - increase_balance(state, target_index, active_balance)?; + decrease_balance(state, source_index, source_effective_balance)?; + increase_balance(state, target_index, source_effective_balance)?; affected_validators.insert(source_index); affected_validators.insert(target_index); diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e532d9f10..1e64ef2897 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,8 +1,10 @@ +use bls::Signature; +use itertools::Itertools; use safe_arith::SafeArith; use std::mem; use types::{ BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, Epoch, EpochCache, - EthSpec, Fork, + EthSpec, Fork, PendingDeposit, }; /// Transform a `Deneb` state into an `Electra` state. @@ -38,29 +40,44 @@ pub fn upgrade_to_electra( // Add validators that are not yet active to pending balance deposits let validators = post.validators().clone(); - let mut pre_activation = validators + let pre_activation = validators .iter() .enumerate() .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) .collect::>(); - // Sort the indices by activation_eligibility_epoch and then by index - pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { - if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { - index_a.cmp(index_b) - } else { - val_a - .activation_eligibility_epoch - .cmp(&val_b.activation_eligibility_epoch) - } - }); - // Process validators to queue entire balance and reset them for (index, _) in pre_activation { - post.queue_entire_balance_and_reset_validator(index, spec)?; + let balance = post + .balances_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + let balance_copy = *balance; + *balance = 0_u64; + + let validator = post + .validators_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + validator.effective_balance = 0; + validator.activation_eligibility_epoch = spec.far_future_epoch; + let pubkey = validator.pubkey; + let withdrawal_credentials = validator.withdrawal_credentials; + + post.pending_deposits_mut()? + .push(PendingDeposit { + pubkey, + withdrawal_credentials, + amount: balance_copy, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + }) + .map_err(Error::MilhouseError)?; } // Ensure early adopters of compounding credentials go through the activation churn + let validators = post.validators().clone(); for (index, validator) in validators.iter().enumerate() { if validator.has_compounding_withdrawal_credential(spec) { post.queue_excess_active_balance(index, spec)?; @@ -137,7 +154,7 @@ pub fn upgrade_state_to_electra( earliest_exit_epoch, consolidation_balance_to_consume: 0, earliest_consolidation_epoch, - pending_balance_deposits: Default::default(), + pending_deposits: Default::default(), pending_partial_withdrawals: Default::default(), pending_consolidations: Default::default(), // Caches diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 223b12e768..6edd8d3892 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -146,4 +146,4 @@ impl AggregateAndProof { } impl SignedRoot for AggregateAndProof {} -impl<'a, E: EthSpec> SignedRoot for AggregateAndProofRef<'a, E> {} +impl SignedRoot for AggregateAndProofRef<'_, E> {} diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 3801a2b5d2..190964736f 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -233,7 +233,7 @@ impl Attestation { } } -impl<'a, E: EthSpec> AttestationRef<'a, E> { +impl AttestationRef<'_, E> { pub fn clone_as_attestation(self) -> Attestation { match self { Self::Base(att) => Attestation::Base(att.clone()), @@ -422,7 +422,7 @@ impl SlotData for Attestation { } } -impl<'a, E: EthSpec> SlotData for AttestationRef<'a, E> { +impl SlotData for AttestationRef<'_, E> { fn get_slot(&self) -> Slot { self.data().slot } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index a298303513..801b7dd1c7 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -80,10 +80,7 @@ pub struct BeaconBlock = FullPayload pub type BlindedBeaconBlock = BeaconBlock>; impl> SignedRoot for BeaconBlock {} -impl<'a, E: EthSpec, Payload: AbstractExecPayload> SignedRoot - for BeaconBlockRef<'a, E, Payload> -{ -} +impl> SignedRoot for BeaconBlockRef<'_, E, Payload> {} /// Empty block trait for each block variant to implement. pub trait EmptyBlock { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index e04eedbb77..07cf6f41de 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -145,7 +145,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, } } - fn body_merkle_leaves(&self) -> Vec { + pub(crate) fn body_merkle_leaves(&self) -> Vec { let mut leaves = vec![]; match self { Self::Base(body) => { @@ -176,57 +176,71 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, leaves } - /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` - /// at `index`. + /// Calculate a KZG commitment merkle proof. + /// + /// Prefer to use `complete_kzg_commitment_merkle_proof` with a reused proof for the + /// `blob_kzg_commitments` field. pub fn kzg_commitment_merkle_proof( &self, index: usize, ) -> Result, Error> { - // We compute the branches by generating 2 merkle trees: - // 1. Merkle tree for the `blob_kzg_commitments` List object - // 2. Merkle tree for the `BeaconBlockBody` container - // We then merge the branches for both the trees all the way up to the root. + let kzg_commitments_proof = self.kzg_commitments_merkle_proof()?; + let proof = self.complete_kzg_commitment_merkle_proof(index, &kzg_commitments_proof)?; + Ok(proof) + } - // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) - // - // Branches for `blob_kzg_commitments` without length mix-in - let blob_leaves = self - .blob_kzg_commitments()? - .iter() - .map(|commitment| commitment.tree_hash_root()) - .collect::>(); - let depth = E::max_blob_commitments_per_block() - .next_power_of_two() - .ilog2(); - let tree = MerkleTree::create(&blob_leaves, depth as usize); - let (_, mut proof) = tree - .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; + /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` + /// at `index` using an existing proof for the `blob_kzg_commitments` field. + pub fn complete_kzg_commitment_merkle_proof( + &self, + index: usize, + kzg_commitments_proof: &[Hash256], + ) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + Self::Deneb(_) | Self::Electra(_) => { + // We compute the branches by generating 2 merkle trees: + // 1. Merkle tree for the `blob_kzg_commitments` List object + // 2. Merkle tree for the `BeaconBlockBody` container + // We then merge the branches for both the trees all the way up to the root. - // Add the branch corresponding to the length mix-in. - let length = blob_leaves.len(); - let usize_len = std::mem::size_of::(); - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes - .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? - .copy_from_slice(&length.to_le_bytes()); - let length_root = Hash256::from_slice(length_bytes.as_slice()); - proof.push(length_root); + // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) + // + // Branches for `blob_kzg_commitments` without length mix-in + let blob_leaves = self + .blob_kzg_commitments()? + .iter() + .map(|commitment| commitment.tree_hash_root()) + .collect::>(); + let depth = E::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2(); + let tree = MerkleTree::create(&blob_leaves, depth as usize); + let (_, mut proof) = tree + .generate_proof(index, depth as usize) + .map_err(Error::MerkleTreeError)?; - // Part 2 - // Branches for `BeaconBlockBody` container - let body_leaves = self.body_merkle_leaves(); - let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; - let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); - let (_, mut proof_body) = tree - .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; - // Join the proofs for the subtree and the main tree - proof.append(&mut proof_body); - debug_assert_eq!(proof.len(), E::kzg_proof_inclusion_proof_depth()); + // Add the branch corresponding to the length mix-in. + let length = blob_leaves.len(); + let usize_len = std::mem::size_of::(); + let mut length_bytes = [0; BYTES_PER_CHUNK]; + length_bytes + .get_mut(0..usize_len) + .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .copy_from_slice(&length.to_le_bytes()); + let length_root = Hash256::from_slice(length_bytes.as_slice()); + proof.push(length_root); - Ok(proof.into()) + // Part 2 + // Branches for `BeaconBlockBody` container + // Join the proofs for the subtree and the main tree + proof.extend_from_slice(kzg_commitments_proof); + + Ok(FixedVector::new(proof)?) + } + } } /// Produces the proof of inclusion for `self.blob_kzg_commitments`. @@ -239,7 +253,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let (_, proof) = tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) .map_err(Error::MerkleTreeError)?; - Ok(proof.into()) + Ok(FixedVector::new(proof)?) } pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { @@ -364,7 +378,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRefMut<'a, } } -impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { +impl> BeaconBlockBodyRef<'_, E, Payload> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/beacon_committee.rs index ad293c3a3b..bdb91cd6e6 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/beacon_committee.rs @@ -7,7 +7,7 @@ pub struct BeaconCommittee<'a> { pub committee: &'a [usize], } -impl<'a> BeaconCommittee<'a> { +impl BeaconCommittee<'_> { pub fn into_owned(self) -> OwnedBeaconCommittee { OwnedBeaconCommittee { slot: self.slot, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index d772cb23b3..ad4484b86a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -59,6 +59,7 @@ pub enum Error { UnknownValidator(usize), UnableToDetermineProducer, InvalidBitfield, + EmptyCommittee, ValidatorIsWithdrawable, ValidatorIsInactive { val_index: usize, @@ -155,7 +156,6 @@ pub enum Error { current_fork: ForkName, }, TotalActiveBalanceDiffUninitialized, - MissingImmutableValidator(usize), IndexNotSupported(usize), InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), @@ -510,7 +510,7 @@ where #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] @@ -1548,19 +1548,23 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Add a validator to the registry and return the validator index that was allocated for it. pub fn add_validator_to_registry( &mut self, - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, + amount: u64, spec: &ChainSpec, - ) -> Result<(), Error> { - let fork = self.fork_name_unchecked(); - let amount = if fork.electra_enabled() { - 0 - } else { - deposit_data.amount - }; - self.validators_mut() - .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + ) -> Result { + let index = self.validators().len(); + let fork_name = self.fork_name_unchecked(); + self.validators_mut().push(Validator::from_deposit( + pubkey, + withdrawal_credentials, + amount, + fork_name, + spec, + ))?; self.balances_mut().push(amount)?; // Altair or later initializations. @@ -1574,7 +1578,20 @@ impl BeaconState { inactivity_scores.push(0)?; } - Ok(()) + // Keep the pubkey cache up to date if it was up to date prior to this call. + // + // Doing this here while we know the pubkey and index is marginally quicker than doing it in + // a call to `update_pubkey_cache` later because we don't need to index into the validators + // tree again. + let pubkey_cache = self.pubkey_cache_mut(); + if pubkey_cache.len() == index { + let success = pubkey_cache.insert(pubkey, index); + if !success { + return Err(Error::PubkeyCacheInconsistent); + } + } + + Ok(index) } /// Safe copy-on-write accessor for the `validators` list. @@ -1781,19 +1798,6 @@ impl BeaconState { } } - /// Get the number of outstanding deposits. - /// - /// Returns `Err` if the state is invalid. - pub fn get_outstanding_deposit_len(&self) -> Result { - self.eth1_data() - .deposit_count - .checked_sub(self.eth1_deposit_index()) - .ok_or(Error::InvalidDepositState { - deposit_count: self.eth1_data().deposit_count, - deposit_index: self.eth1_deposit_index(), - }) - } - /// Build all caches (except the tree hash cache), if they need to be built. pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; @@ -2150,27 +2154,6 @@ impl BeaconState { .map_err(Into::into) } - /// Get active balance for the given `validator_index`. - pub fn get_active_balance( - &self, - validator_index: usize, - spec: &ChainSpec, - current_fork: ForkName, - ) -> Result { - let max_effective_balance = self - .validators() - .get(validator_index) - .map(|validator| validator.get_max_effective_balance(spec, current_fork)) - .ok_or(Error::UnknownValidator(validator_index))?; - Ok(std::cmp::min( - *self - .balances() - .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?, - max_effective_balance, - )) - } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { let mut pending_balance = 0; for withdrawal in self @@ -2197,42 +2180,18 @@ impl BeaconState { if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: excess_balance, - })?; + let validator = self.get_validator(validator_index)?.clone(); + self.pending_deposits_mut()?.push(PendingDeposit { + pubkey: validator.pubkey, + withdrawal_credentials: validator.withdrawal_credentials, + amount: excess_balance, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + })?; } Ok(()) } - pub fn queue_entire_balance_and_reset_validator( - &mut self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<(), Error> { - let balance = self - .balances_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - let balance_copy = *balance; - *balance = 0_u64; - - let validator = self - .validators_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - validator.effective_balance = 0; - validator.activation_eligibility_epoch = spec.far_future_epoch; - - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: balance_copy, - }) - .map_err(Into::into) - } - /// Change the withdrawal prefix of the given `validator_index` to the compounding withdrawal validator prefix. pub fn switch_to_compounding_validator( &mut self, @@ -2243,12 +2202,10 @@ impl BeaconState { .validators_mut() .get_mut(validator_index) .ok_or(Error::UnknownValidator(validator_index))?; - if validator.has_eth1_withdrawal_credential(spec) { - AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = - spec.compounding_withdrawal_prefix_byte; + AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = + spec.compounding_withdrawal_prefix_byte; - self.queue_excess_active_balance(validator_index, spec)?; - } + self.queue_excess_active_balance(validator_index, spec)?; Ok(()) } @@ -2506,33 +2463,64 @@ impl BeaconState { Ok(()) } - pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { - // 1. Convert generalized index to field index. - let field_index = match generalized_index { + pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX - | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { - // Sync committees are top-level fields, subtract off the generalized indices - // for the internal nodes. Result should be 22 or 23, the field offset of the committee - // in the `BeaconState`: - // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - light_client_update::FINALIZED_ROOT_INDEX => { - // Finalized root is the right child of `finalized_checkpoint`, divide by two to get - // the generalized index of `state.finalized_checkpoint`. - let finalized_checkpoint_generalized_index = generalized_index / 2; - // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches - // position of `finalized_checkpoint` in `BeaconState`. - finalized_checkpoint_generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - _ => return Err(Error::IndexNotSupported(generalized_index)), }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } - // 2. Get all `BeaconState` leaves. + pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } + + pub fn compute_finalized_root_proof(&self) -> Result, Error> { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let field_index = if self.fork_name_unchecked().electra_enabled() { + // Index should be 169/2 - 64 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + } else { + // Index should be 105/2 - 32 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + let mut proof = self.generate_proof(field_index, &leaves)?; + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + Ok(proof) + } + + fn generate_proof( + &self, + field_index: usize, + leaves: &[Hash256], + ) -> Result, Error> { + let depth = self.num_fields_pow2().ilog2() as usize; + let tree = merkle_proof::MerkleTree::create(leaves, depth); + let (_, proof) = tree.generate_proof(field_index, depth)?; + Ok(proof) + } + + fn get_beacon_state_leaves(&self) -> Vec { let mut leaves = vec![]; #[allow(clippy::arithmetic_side_effects)] match self { @@ -2568,18 +2556,7 @@ impl BeaconState { } }; - // 3. Make deposit tree. - // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). - let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; - let tree = merkle_proof::MerkleTree::create(&leaves, depth); - let (_, mut proof) = tree.generate_proof(field_index, depth)?; - - // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. - if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { - proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); - } - - Ok(proof) + leaves } } diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/beacon_state/iter.rs index 2caa0365e0..d99c769e40 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/beacon_state/iter.rs @@ -27,7 +27,7 @@ impl<'a, E: EthSpec> BlockRootsIter<'a, E> { } } -impl<'a, E: EthSpec> Iterator for BlockRootsIter<'a, E> { +impl Iterator for BlockRootsIter<'_, E> { type Item = Result<(Slot, Hash256), Error>; fn next(&mut self) -> Option { diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 3ad3ccf561..bfa7bb86d2 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -307,43 +307,6 @@ mod committees { } } -mod get_outstanding_deposit_len { - use super::*; - - async fn state() -> BeaconState { - get_harness(16, Slot::new(0)) - .await - .chain - .head_beacon_state_cloned() - } - - #[tokio::test] - async fn returns_ok() { - let mut state = state().await; - assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); - - state.eth1_data_mut().deposit_count = 17; - *state.eth1_deposit_index_mut() = 16; - assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); - } - - #[tokio::test] - async fn returns_err_if_the_state_is_invalid() { - let mut state = state().await; - // The state is invalid, deposit count is lower than deposit index. - state.eth1_data_mut().deposit_count = 16; - *state.eth1_deposit_index_mut() = 17; - - assert_eq!( - state.get_outstanding_deposit_len(), - Err(BeaconStateError::InvalidDepositState { - deposit_count: 16, - deposit_index: 17, - }) - ); - } -} - #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 9acbd8d95c..67ca5cbd56 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -140,6 +140,37 @@ impl BlobSidecar { }) } + pub fn new_with_existing_proof( + index: usize, + blob: Blob, + signed_block: &SignedBeaconBlock, + signed_block_header: SignedBeaconBlockHeader, + kzg_commitments_inclusion_proof: &[Hash256], + kzg_proof: KzgProof, + ) -> Result { + let expected_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_e| BlobSidecarError::PreDeneb)?; + let kzg_commitment = *expected_kzg_commitments + .get(index) + .ok_or(BlobSidecarError::MissingKzgCommitment)?; + let kzg_commitment_inclusion_proof = signed_block + .message() + .body() + .complete_kzg_commitment_merkle_proof(index, kzg_commitments_inclusion_proof)?; + + Ok(Self { + index: index as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header, + kzg_commitment_inclusion_proof, + }) + } + pub fn id(&self) -> BlobIdentifier { BlobIdentifier { block_root: self.block_root(), diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index bb963a6d63..c1d87354a8 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -127,6 +127,11 @@ pub struct ChainSpec { pub deposit_network_id: u64, pub deposit_contract_address: Address, + /* + * Execution Specs + */ + pub gas_limit_adjustment_factor: u64, + /* * Altair hard fork params */ @@ -204,7 +209,6 @@ pub struct ChainSpec { pub target_aggregators_per_committee: u64, pub gossip_max_size: u64, pub max_request_blocks: u64, - pub epochs_per_subnet_subscription: u64, pub min_epochs_for_block_requests: u64, pub max_chunk_size: u64, pub ttfb_timeout: u64, @@ -215,9 +219,7 @@ pub struct ChainSpec { pub message_domain_valid_snappy: [u8; 4], pub subnets_per_node: u8, pub attestation_subnet_count: u64, - pub attestation_subnet_extra_bits: u8, pub attestation_subnet_prefix_bits: u8, - pub attestation_subnet_shuffling_prefix_bits: u8, /* * Networking Deneb @@ -729,6 +731,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -827,7 +834,6 @@ impl ChainSpec { subnets_per_node: 2, maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), max_chunk_size: default_max_chunk_size(), @@ -835,10 +841,7 @@ impl ChainSpec { resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), message_domain_valid_snappy: default_message_domain_valid_snappy(), - attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), - attestation_subnet_shuffling_prefix_bits: - default_attestation_subnet_shuffling_prefix_bits(), max_request_blocks: default_max_request_blocks(), /* @@ -1048,6 +1051,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -1145,7 +1153,6 @@ impl ChainSpec { subnets_per_node: 4, // Make this larger than usual to avoid network damage maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: 33024, max_chunk_size: default_max_chunk_size(), @@ -1153,11 +1160,8 @@ impl ChainSpec { resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), message_domain_valid_snappy: default_message_domain_valid_snappy(), - attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), - attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), - attestation_subnet_shuffling_prefix_bits: - default_attestation_subnet_shuffling_prefix_bits(), max_request_blocks: default_max_request_blocks(), + attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), /* * Networking Deneb Specific @@ -1309,15 +1313,16 @@ pub struct Config { #[serde(with = "serde_utils::address_hex")] deposit_contract_address: Address, + #[serde(default = "default_gas_limit_adjustment_factor")] + #[serde(with = "serde_utils::quoted_u64")] + gas_limit_adjustment_factor: u64, + #[serde(default = "default_gossip_max_size")] #[serde(with = "serde_utils::quoted_u64")] gossip_max_size: u64, #[serde(default = "default_max_request_blocks")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks: u64, - #[serde(default = "default_epochs_per_subnet_subscription")] - #[serde(with = "serde_utils::quoted_u64")] - epochs_per_subnet_subscription: u64, #[serde(default = "default_min_epochs_for_block_requests")] #[serde(with = "serde_utils::quoted_u64")] min_epochs_for_block_requests: u64, @@ -1342,15 +1347,9 @@ pub struct Config { #[serde(default = "default_message_domain_valid_snappy")] #[serde(with = "serde_utils::bytes_4_hex")] message_domain_valid_snappy: [u8; 4], - #[serde(default = "default_attestation_subnet_extra_bits")] - #[serde(with = "serde_utils::quoted_u8")] - attestation_subnet_extra_bits: u8, #[serde(default = "default_attestation_subnet_prefix_bits")] #[serde(with = "serde_utils::quoted_u8")] attestation_subnet_prefix_bits: u8, - #[serde(default = "default_attestation_subnet_shuffling_prefix_bits")] - #[serde(with = "serde_utils::quoted_u8")] - attestation_subnet_shuffling_prefix_bits: u8, #[serde(default = "default_max_request_blocks_deneb")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks_deneb: u64, @@ -1435,10 +1434,18 @@ fn default_subnets_per_node() -> u8 { 2u8 } +fn default_attestation_subnet_prefix_bits() -> u8 { + 6 +} + const fn default_max_per_epoch_activation_churn_limit() -> u64 { 8 } +const fn default_gas_limit_adjustment_factor() -> u64 { + 1024 +} + const fn default_gossip_max_size() -> u64 { 10485760 } @@ -1467,18 +1474,6 @@ const fn default_message_domain_valid_snappy() -> [u8; 4] { [1, 0, 0, 0] } -const fn default_attestation_subnet_extra_bits() -> u8 { - 0 -} - -const fn default_attestation_subnet_prefix_bits() -> u8 { - 6 -} - -const fn default_attestation_subnet_shuffling_prefix_bits() -> u8 { - 3 -} - const fn default_max_request_blocks() -> u64 { 1024 } @@ -1517,10 +1512,6 @@ const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { 256_000_000_000 } -const fn default_epochs_per_subnet_subscription() -> u64 { - 256 -} - const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1698,6 +1689,7 @@ impl Config { shard_committee_period: spec.shard_committee_period, eth1_follow_distance: spec.eth1_follow_distance, subnets_per_node: spec.subnets_per_node, + attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, inactivity_score_bias: spec.inactivity_score_bias, inactivity_score_recovery_rate: spec.inactivity_score_recovery_rate, @@ -1712,9 +1704,10 @@ impl Config { deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, + gas_limit_adjustment_factor: spec.gas_limit_adjustment_factor, + gossip_max_size: spec.gossip_max_size, max_request_blocks: spec.max_request_blocks, - epochs_per_subnet_subscription: spec.epochs_per_subnet_subscription, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, max_chunk_size: spec.max_chunk_size, ttfb_timeout: spec.ttfb_timeout, @@ -1723,9 +1716,6 @@ impl Config { maximum_gossip_clock_disparity_millis: spec.maximum_gossip_clock_disparity_millis, message_domain_invalid_snappy: spec.message_domain_invalid_snappy, message_domain_valid_snappy: spec.message_domain_valid_snappy, - attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, - attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, - attestation_subnet_shuffling_prefix_bits: spec.attestation_subnet_shuffling_prefix_bits, max_request_blocks_deneb: spec.max_request_blocks_deneb, max_request_blob_sidecars: spec.max_request_blob_sidecars, max_request_data_column_sidecars: spec.max_request_data_column_sidecars, @@ -1780,6 +1770,7 @@ impl Config { shard_committee_period, eth1_follow_distance, subnets_per_node, + attestation_subnet_prefix_bits, inactivity_score_bias, inactivity_score_recovery_rate, ejection_balance, @@ -1790,6 +1781,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -1797,11 +1789,7 @@ impl Config { resp_timeout, message_domain_invalid_snappy, message_domain_valid_snappy, - attestation_subnet_extra_bits, - attestation_subnet_prefix_bits, - attestation_subnet_shuffling_prefix_bits, max_request_blocks, - epochs_per_subnet_subscription, attestation_propagation_slot_range, maximum_gossip_clock_disparity_millis, max_request_blocks_deneb, @@ -1856,6 +1844,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, @@ -1866,11 +1855,8 @@ impl Config { resp_timeout, message_domain_invalid_snappy, message_domain_valid_snappy, - attestation_subnet_extra_bits, attestation_subnet_prefix_bits, - attestation_subnet_shuffling_prefix_bits, max_request_blocks, - epochs_per_subnet_subscription, attestation_propagation_slot_range, maximum_gossip_clock_disparity_millis, max_request_blocks_deneb, @@ -2167,9 +2153,7 @@ mod yaml_tests { check_default!(resp_timeout); check_default!(message_domain_invalid_snappy); check_default!(message_domain_valid_snappy); - check_default!(attestation_subnet_extra_bits); check_default!(attestation_subnet_prefix_bits); - check_default!(attestation_subnet_shuffling_prefix_bits); assert_eq!(chain_spec.bellatrix_fork_epoch, None); } diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index bfc3655880..b2a050e9d5 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; use crate::BeaconStateError; -use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; +use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; use kzg::Error as KzgError; @@ -67,6 +67,10 @@ impl DataColumnSidecar { self.signed_block_header.message.slot } + pub fn epoch(&self) -> Epoch { + self.slot().epoch(E::slots_per_epoch()) + } + pub fn block_root(&self) -> Hash256 { self.signed_block_header.message.tree_hash_root() } diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index 7af949fef3..a21760551b 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::{Hash256, PublicKeyBytes, Signature}; +use crate::{Hash256, PublicKeyBytes}; +use bls::SignatureBytes; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -10,7 +11,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -25,7 +25,7 @@ pub struct DepositRequest { pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, - pub signature: Signature, + pub signature: SignatureBytes, #[serde(with = "serde_utils::quoted_u64")] pub index: u64, } @@ -36,7 +36,7 @@ impl DepositRequest { pubkey: PublicKeyBytes::empty(), withdrawal_credentials: Hash256::ZERO, amount: 0, - signature: Signature::empty(), + signature: SignatureBytes::empty(), index: 0, } .as_ssz_bytes() diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 3d496d214e..976766dfa9 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -149,7 +149,7 @@ pub trait EthSpec: /* * New in Electra */ - type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxConsolidationRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -157,6 +157,7 @@ pub trait EthSpec: type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxPendingDepositsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -324,9 +325,9 @@ pub trait EthSpec: .expect("Preset values are not configurable and never result in non-positive block body depth") } - /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. - fn pending_balance_deposits_limit() -> usize { - Self::PendingBalanceDepositsLimit::to_usize() + /// Returns the `PENDING_DEPOSITS_LIMIT` constant for this specification. + fn pending_deposits_limit() -> usize { + Self::PendingDepositsLimit::to_usize() } /// Returns the `PENDING_PARTIAL_WITHDRAWALS_LIMIT` constant for this specification. @@ -364,6 +365,11 @@ pub trait EthSpec: Self::MaxWithdrawalRequestsPerPayload::to_usize() } + /// Returns the `MAX_PENDING_DEPOSITS_PER_EPOCH` constant for this specification. + fn max_pending_deposits_per_epoch() -> usize { + Self::MaxPendingDepositsPerEpoch::to_usize() + } + fn kzg_commitments_inclusion_proof_depth() -> usize { Self::KzgCommitmentsInclusionProofDepth::to_usize() } @@ -422,7 +428,7 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -430,6 +436,7 @@ impl EthSpec for MainnetEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -491,7 +498,8 @@ impl EthSpec for MinimalEthSpec { MaxExtraDataBytes, MaxBlsToExecutionChanges, BytesPerFieldElement, - PendingBalanceDepositsLimit, + PendingDepositsLimit, + MaxPendingDepositsPerEpoch, MaxConsolidationRequestsPerPayload, MaxAttesterSlashingsElectra, MaxAttestationsElectra @@ -547,7 +555,7 @@ impl EthSpec for GnosisEthSpec { type BytesPerFieldElement = U32; type BytesPerBlob = U131072; type KzgCommitmentInclusionProofDepth = U17; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -555,6 +563,7 @@ impl EthSpec for GnosisEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; type BytesPerCell = U2048; diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 694162d6ff..60f2960afb 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -52,9 +52,11 @@ pub struct ExecutionBlockHeader { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option, + pub requests_root: Option, } impl ExecutionBlockHeader { + #[allow(clippy::too_many_arguments)] pub fn from_payload( payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, @@ -63,6 +65,7 @@ impl ExecutionBlockHeader { rlp_blob_gas_used: Option, rlp_excess_blob_gas: Option, rlp_parent_beacon_block_root: Option, + rlp_requests_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -87,6 +90,7 @@ impl ExecutionBlockHeader { blob_gas_used: rlp_blob_gas_used, excess_blob_gas: rlp_excess_blob_gas, parent_beacon_block_root: rlp_parent_beacon_block_root, + requests_root: rlp_requests_root, } } } @@ -114,6 +118,7 @@ pub struct EncodableExecutionBlockHeader<'a> { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option<&'a [u8]>, + pub requests_root: Option<&'a [u8]>, } impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { @@ -139,6 +144,7 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: None, + requests_root: None, }; if let Some(withdrawals_root) = &header.withdrawals_root { encodable.withdrawals_root = Some(withdrawals_root.as_slice()); @@ -146,6 +152,9 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { encodable.parent_beacon_block_root = Some(parent_beacon_block_root.as_slice()) } + if let Some(requests_root) = &header.requests_root { + encodable.requests_root = Some(requests_root.as_slice()) + } encodable } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index e9690435f1..4bfbfee9bf 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -371,7 +371,7 @@ impl TryFrom> for ExecutionPayloadHeaderDe } } -impl<'a, E: EthSpec> ExecutionPayloadHeaderRefMut<'a, E> { +impl ExecutionPayloadHeaderRefMut<'_, E> { /// Mutate through pub fn replace(self, header: ExecutionPayloadHeader) -> Result<(), BeaconStateError> { match self { diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 778260dd84..96a3905420 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -1,7 +1,8 @@ use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; use derivative::Derivative; +use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -47,6 +48,43 @@ impl ExecutionRequests { let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] } + + /// Generate the execution layer `requests_hash` based on EIP-7685. + /// + /// `sha256(sha256(requests_0) ++ sha256(requests_1) ++ ...)` + pub fn requests_hash(&self) -> Hash256 { + let mut hasher = DynamicContext::new(); + + for (i, request) in self.get_execution_requests_list().iter().enumerate() { + let mut request_hasher = DynamicContext::new(); + request_hasher.update(&[i as u8]); + request_hasher.update(request); + let request_hash = request_hasher.finalize(); + + hasher.update(&request_hash); + } + + hasher.finalize().into() + } +} + +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +pub enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } } #[cfg(test)] diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 76bb111ea2..8c82d52b81 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; #[derive( Debug, PartialEq, + Eq, Serialize, Deserialize, Encode, diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 9274600ed2..f3243a9f05 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -134,7 +134,7 @@ impl IndexedAttestation { } } -impl<'a, E: EthSpec> IndexedAttestationRef<'a, E> { +impl IndexedAttestationRef<'_, E> { pub fn is_double_vote(&self, other: Self) -> bool { self.data().target.epoch == other.data().target.epoch && self.data() != other.data() } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index e168199b98..dd304c6296 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -54,8 +54,8 @@ pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; -pub mod pending_balance_deposit; pub mod pending_consolidation; +pub mod pending_deposit; pub mod pending_partial_withdrawal; pub mod proposer_preparation_data; pub mod proposer_slashing; @@ -170,7 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; -pub use crate::execution_requests::ExecutionRequests; +pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -200,7 +200,7 @@ pub use crate::light_client_optimistic_update::{ }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ @@ -210,8 +210,8 @@ pub use crate::payload::{ FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_balance_deposit::PendingBalanceDeposit; pub use crate::pending_consolidation::PendingConsolidation; +pub use crate::pending_deposit::PendingDeposit; pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 25f029bcc0..21a7e5416f 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -57,7 +57,16 @@ pub struct LightClientBootstrap { /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "current_sync_committee_branch_altair") + )] pub current_sync_committee_branch: FixedVector, + #[superstruct( + only(Electra), + partial_getter(rename = "current_sync_committee_branch_electra") + )] + pub current_sync_committee_branch: FixedVector, } impl LightClientBootstrap { @@ -115,7 +124,7 @@ impl LightClientBootstrap { pub fn new( block: &SignedBlindedBeaconBlock, current_sync_committee: Arc>, - current_sync_committee_branch: FixedVector, + current_sync_committee_branch: Vec, chain_spec: &ChainSpec, ) -> Result { let light_client_bootstrap = match block @@ -126,22 +135,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -155,9 +164,7 @@ impl LightClientBootstrap { ) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.update_tree_hash_cache()?; - let current_sync_committee_branch = - FixedVector::new(beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?)?; - + let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block @@ -168,22 +175,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -210,8 +217,28 @@ impl ForkVersionDeserialize for LightClientBootstrap { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientBootstrapAltair, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapAltair); + } - ssz_tests!(LightClientBootstrapDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientBootstrapCapella, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientBootstrapDeneb, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientBootstrapElectra, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapElectra); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 91ee58b4be..ba2f2083cd 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -63,8 +63,13 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - #[test_random(default)] + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FixedVector, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -75,7 +80,7 @@ impl LightClientFinalityUpdate { pub fn new( attested_block: &SignedBlindedBeaconBlock, finalized_block: &SignedBlindedBeaconBlock, - finality_branch: FixedVector, + finality_branch: Vec, sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, @@ -92,7 +97,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }) @@ -104,7 +109,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -115,7 +120,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -126,7 +131,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -226,8 +231,28 @@ impl ForkVersionDeserialize for LightClientFinalityUpdate { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientFinalityUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateAltair); + } - ssz_tests!(LightClientFinalityUpdateDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientFinalityUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientFinalityUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateElectra); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index fecdc39533..6655e0a093 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -179,12 +179,12 @@ impl LightClientHeaderCapella { .to_ref() .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; - return Ok(LightClientHeaderCapella { + Ok(LightClientHeaderCapella { beacon: block.message().block_header(), execution: header, execution_branch: FixedVector::new(execution_branch)?, _phantom_data: PhantomData, - }); + }) } } @@ -307,3 +307,31 @@ impl ForkVersionDeserialize for LightClientHeader { } } } + +#[cfg(test)] +mod tests { + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientHeaderAltair, MainnetEthSpec}; + ssz_tests!(LightClientHeaderAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientHeaderCapella, MainnetEthSpec}; + ssz_tests!(LightClientHeaderCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientHeaderDeneb, MainnetEthSpec}; + ssz_tests!(LightClientHeaderDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientHeaderElectra, MainnetEthSpec}; + ssz_tests!(LightClientHeaderElectra); + } +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 2f8cc034eb..209388af87 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -214,8 +214,28 @@ impl ForkVersionDeserialize for LightClientOptimisticUpdate { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientOptimisticUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateAltair); + } - ssz_tests!(LightClientOptimisticUpdateDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientOptimisticUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientOptimisticUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateElectra); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 1f5592a929..c3a50e71c1 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -14,7 +14,7 @@ use serde_json::Value; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6}; +use ssz_types::typenum::{U4, U5, U6, U7}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -25,24 +25,39 @@ pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; pub const EXECUTION_PAYLOAD_INDEX: usize = 25; +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; pub type ExecutionPayloadProofLen = U4; - pub type NextSyncCommitteeProofLen = U5; +pub type FinalizedRootProofLenElectra = U7; +pub type CurrentSyncCommitteeProofLenElectra = U6; +pub type NextSyncCommitteeProofLenElectra = U6; + pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub type MerkleProof = Vec; // Max light client updates by range request limits // spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; type FinalityBranch = FixedVector; +type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type NextSyncCommitteeBranchElectra = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -124,8 +139,17 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderElectra, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, - /// Merkle proof for next sync committee + // Merkle proof for next sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "next_sync_committee_branch_altair") + )] pub next_sync_committee_branch: NextSyncCommitteeBranch, + #[superstruct( + only(Electra), + partial_getter(rename = "next_sync_committee_branch_electra") + )] + pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -136,7 +160,13 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FinalityBranch, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -165,8 +195,8 @@ impl LightClientUpdate { sync_aggregate: &SyncAggregate, block_slot: Slot, next_sync_committee: Arc>, - next_sync_committee_branch: FixedVector, - finality_branch: FixedVector, + next_sync_committee_branch: Vec, + finality_branch: Vec, attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, @@ -189,9 +219,9 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -209,9 +239,9 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -229,9 +259,9 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -249,9 +279,9 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -388,25 +418,21 @@ impl LightClientUpdate { return Ok(new_attested_header_slot < prev_attested_header_slot); } - return Ok(new.signature_slot() < self.signature_slot()); + Ok(new.signature_slot() < self.signature_slot()) } - fn is_next_sync_committee_branch_empty(&self) -> bool { - for index in self.next_sync_committee_branch().iter() { - if *index != Hash256::default() { - return false; - } - } - true + fn is_next_sync_committee_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.next_sync_committee_branch.as_ref()) + }) } - pub fn is_finality_branch_empty(&self) -> bool { - for index in self.finality_branch().iter() { - if *index != Hash256::default() { - return false; - } - } - true + pub fn is_finality_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.finality_branch.as_ref()) + }) } // A `LightClientUpdate` has two `LightClientHeader`s @@ -436,6 +462,15 @@ impl LightClientUpdate { } } +fn is_empty_branch(branch: &[Hash256]) -> bool { + for index in branch.iter() { + if *index != Hash256::default() { + return false; + } + } + true +} + fn compute_sync_committee_period_at_slot( slot: Slot, chain_spec: &ChainSpec, @@ -447,16 +482,53 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; use ssz_types::typenum::Unsigned; - ssz_tests!(LightClientUpdateDeneb); + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateAltair); + } + + #[cfg(test)] + mod capella { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateElectra); + } #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32) <= FINALIZED_ROOT_INDEX_ELECTRA + ); + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32 + 1) > FINALIZED_ROOT_INDEX_ELECTRA + ); + assert_eq!( + FinalizedRootProofLenElectra::to_usize(), + FINALIZED_ROOT_PROOF_LEN_ELECTRA + ); } #[test] @@ -471,6 +543,19 @@ mod tests { CurrentSyncCommitteeProofLen::to_usize(), CURRENT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + CurrentSyncCommitteeProofLenElectra::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } #[test] @@ -481,5 +566,18 @@ mod tests { NextSyncCommitteeProofLen::to_usize(), NEXT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + NextSyncCommitteeProofLenElectra::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 80a70c171f..e68801840a 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -32,6 +32,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; fn timestamp(&self) -> u64; + fn extra_data(&self) -> VariableList; fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; @@ -225,6 +226,13 @@ impl ExecPayload for FullPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -317,7 +325,7 @@ impl<'a, E: EthSpec> FullPayloadRef<'a, E> { } } -impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { +impl ExecPayload for FullPayloadRef<'_, E> { fn block_type() -> BlockType { BlockType::Full } @@ -357,6 +365,13 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -542,6 +557,13 @@ impl ExecPayload for BlindedPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -643,6 +665,13 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -745,6 +774,10 @@ macro_rules! impl_exec_payload_common { self.$wrapped_field.timestamp } + fn extra_data(&self) -> VariableList { + self.$wrapped_field.extra_data.clone() + } + fn block_hash(&self) -> ExecutionBlockHash { self.$wrapped_field.block_hash } diff --git a/consensus/types/src/pending_balance_deposit.rs b/consensus/types/src/pending_deposit.rs similarity index 68% rename from consensus/types/src/pending_balance_deposit.rs rename to consensus/types/src/pending_deposit.rs index a2bce577f8..3bee86417d 100644 --- a/consensus/types/src/pending_balance_deposit.rs +++ b/consensus/types/src/pending_deposit.rs @@ -1,4 +1,5 @@ use crate::test_utils::TestRandom; +use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -8,7 +9,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -18,16 +18,18 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct PendingBalanceDeposit { - #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, +pub struct PendingDeposit { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, + pub signature: SignatureBytes, + pub slot: Slot, } #[cfg(test)] mod tests { use super::*; - ssz_and_tree_hash_tests!(PendingBalanceDeposit); + ssz_and_tree_hash_tests!(PendingDeposit); } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 11076ad626..949b6b2abe 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -260,7 +260,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_balance_deposits_limit() as u64, + pending_balance_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index e93f07981e..b1365b34c1 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -18,7 +18,7 @@ use std::slice::SliceIndex; /// ## Example /// /// ``` -/// use ssz_types::{RuntimeVariableList}; +/// use types::{RuntimeVariableList}; /// /// let base: Vec = vec![1, 2, 3, 4]; /// diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index b52adcfe41..bb5e1ea34b 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,6 +1,7 @@ -use crate::beacon_block_body::format_kzg_commitments; +use crate::beacon_block_body::{format_kzg_commitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::*; use derivative::Derivative; +use merkle_proof::MerkleTree; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; @@ -239,6 +240,45 @@ impl> SignedBeaconBlock } } + /// Produce a signed beacon block header AND a merkle proof for the KZG commitments. + /// + /// This method is more efficient than generating each part separately as it reuses hashing. + pub fn signed_block_header_and_kzg_commitments_proof( + &self, + ) -> Result< + ( + SignedBeaconBlockHeader, + FixedVector, + ), + Error, + > { + // Create the block body merkle tree + let body_leaves = self.message().body().body_merkle_leaves(); + let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; + let body_merkle_tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); + + // Compute the KZG commitments inclusion proof + let (_, proof) = body_merkle_tree + .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) + .map_err(Error::MerkleTreeError)?; + let kzg_commitments_inclusion_proof = FixedVector::new(proof)?; + + let block_header = BeaconBlockHeader { + slot: self.slot(), + proposer_index: self.message().proposer_index(), + parent_root: self.parent_root(), + state_root: self.state_root(), + body_root: body_merkle_tree.hash(), + }; + + let signed_header = SignedBeaconBlockHeader { + message: block_header, + signature: self.signature().clone(), + }; + + Ok((signed_header, kzg_commitments_inclusion_proof)) + } + /// Convenience accessor for the block's slot. pub fn slot(&self) -> Slot { self.message().slot() diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 8c8f2d073d..0391756047 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -133,7 +133,7 @@ pub struct SlotIter<'a> { slots_per_epoch: u64, } -impl<'a> Iterator for SlotIter<'a> { +impl Iterator for SlotIter<'_> { type Item = Slot; fn next(&mut self) -> Option { diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 9bfe6fb261..187b070d29 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,14 +1,17 @@ //! Identifies each shard by an integer identifier. -use crate::{AttestationRef, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; +use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; use alloy_primitives::{bytes::Buf, U256}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; use std::sync::LazyLock; -use swap_or_not_shuffle::compute_shuffled_index; const MAX_SUBNET_ID: usize = 64; +/// The number of bits in a Discovery `NodeId`. This is used for binary operations on the node-id +/// data. +const NODE_ID_BITS: u64 = 256; + static SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { let mut v = Vec::with_capacity(MAX_SUBNET_ID); @@ -74,52 +77,22 @@ impl SubnetId { .into()) } - /// Computes the set of subnets the node should be subscribed to during the current epoch, - /// along with the first epoch in which these subscriptions are no longer valid. + /// Computes the set of subnets the node should be subscribed to. We subscribe to these subnets + /// for the duration of the node's runtime. #[allow(clippy::arithmetic_side_effects)] - pub fn compute_subnets_for_epoch( + pub fn compute_attestation_subnets( raw_node_id: [u8; 32], - epoch: Epoch, spec: &ChainSpec, - ) -> Result<(impl Iterator, Epoch), &'static str> { - // simplify variable naming - let subscription_duration = spec.epochs_per_subnet_subscription; + ) -> impl Iterator { + // The bits of the node-id we are using to define the subnets. let prefix_bits = spec.attestation_subnet_prefix_bits as u64; - let shuffling_prefix_bits = spec.attestation_subnet_shuffling_prefix_bits as u64; - let node_id = U256::from_be_slice(&raw_node_id); + let node_id = U256::from_be_slice(&raw_node_id); // calculate the prefixes used to compute the subnet and shuffling - let node_id_prefix = (node_id >> (256 - prefix_bits)).as_le_slice().get_u64_le(); - let shuffling_prefix = (node_id >> (256 - (prefix_bits + shuffling_prefix_bits))) + let node_id_prefix = (node_id >> (NODE_ID_BITS - prefix_bits)) .as_le_slice() .get_u64_le(); - // number of groups the shuffling creates - let shuffling_groups = 1 << shuffling_prefix_bits; - // shuffling group for this node - let shuffling_bits = shuffling_prefix % shuffling_groups; - let epoch_transition = (node_id_prefix - + (shuffling_bits * (subscription_duration >> shuffling_prefix_bits))) - % subscription_duration; - - // Calculate at which epoch this node needs to re-evaluate - let valid_until_epoch = epoch.as_u64() - + subscription_duration - .saturating_sub((epoch.as_u64() + epoch_transition) % subscription_duration); - - let subscription_event_idx = (epoch.as_u64() + epoch_transition) / subscription_duration; - let permutation_seed = - ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); - - let num_subnets = 1 << spec.attestation_subnet_prefix_bits; - let permutated_prefix = compute_shuffled_index( - node_id_prefix as usize, - num_subnets, - &permutation_seed, - spec.shuffle_round_count, - ) - .ok_or("Unable to shuffle")? as u64; - // Get the constants we need to avoid holding a reference to the spec let &ChainSpec { subnets_per_node, @@ -127,10 +100,8 @@ impl SubnetId { .. } = spec; - let subnet_set_generator = (0..subnets_per_node).map(move |idx| { - SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count) - }); - Ok((subnet_set_generator, valid_until_epoch.into())) + (0..subnets_per_node) + .map(move |idx| SubnetId::new((node_id_prefix + idx as u64) % attestation_subnet_count)) } } @@ -180,7 +151,7 @@ mod tests { /// A set of tests compared to the python specification #[test] - fn compute_subnets_for_epoch_unit_test() { + fn compute_attestation_subnets_test() { // Randomized variables used generated with the python specification let node_ids = [ "0", @@ -189,59 +160,34 @@ mod tests { "27726842142488109545414954493849224833670205008410190955613662332153332462900", "39755236029158558527862903296867805548949739810920318269566095185775868999998", "31899136003441886988955119620035330314647133604576220223892254902004850516297", - "58579998103852084482416614330746509727562027284701078483890722833654510444626", - "28248042035542126088870192155378394518950310811868093527036637864276176517397", - "60930578857433095740782970114409273483106482059893286066493409689627770333527", - "103822458477361691467064888613019442068586830412598673713899771287914656699997", ] .map(|v| Uint256::from_str_radix(v, 10).unwrap().to_be_bytes::<32>()); - let epochs = [ - 54321u64, 1017090249, 1827566880, 846255942, 766597383, 1204990115, 1616209495, - 1774367616, 1484598751, 3525502229, - ] - .map(Epoch::from); + let expected_subnets = [ + vec![0, 1], + vec![49u64, 50u64], + vec![10, 11], + vec![15, 16], + vec![21, 22], + vec![17, 18], + ]; // Test mainnet let spec = ChainSpec::mainnet(); - // Calculated by hand - let expected_valid_time = [ - 54528u64, 1017090255, 1827567030, 846256049, 766597387, 1204990287, 1616209536, - 1774367857, 1484598847, 3525502311, - ]; - - // Calculated from pyspec - let expected_subnets = [ - vec![4u64, 5u64], - vec![31, 32], - vec![39, 40], - vec![38, 39], - vec![53, 54], - vec![57, 58], - vec![48, 49], - vec![1, 2], - vec![34, 35], - vec![37, 38], - ]; - for x in 0..node_ids.len() { println!("Test: {}", x); println!( - "NodeId: {:?}\n Epoch: {}\n, expected_update_time: {}\n, expected_subnets: {:?}", - node_ids[x], epochs[x], expected_valid_time[x], expected_subnets[x] + "NodeId: {:?}\nExpected_subnets: {:?}", + node_ids[x], expected_subnets[x] ); - let (computed_subnets, valid_time) = SubnetId::compute_subnets_for_epoch::< - crate::MainnetEthSpec, - >(node_ids[x], epochs[x], &spec) - .unwrap(); + let computed_subnets = SubnetId::compute_attestation_subnets(node_ids[x], &spec); assert_eq!( expected_subnets[x], computed_subnets.map(SubnetId::into).collect::>() ); - assert_eq!(Epoch::from(expected_valid_time[x]), valid_time); } } } diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index ab7ded0409..cf240c3f1f 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -83,6 +83,35 @@ mod test { } } + #[test] + fn test_verify_blob_inclusion_proof_from_existing_proof() { + let (block, mut blob_sidecars) = + generate_rand_block_and_blobs::(ForkName::Deneb, 1, &mut thread_rng()); + let BlobSidecar { + index, + blob, + kzg_proof, + .. + } = blob_sidecars.pop().unwrap(); + + // Compute the commitments inclusion proof and use it for building blob sidecar. + let (signed_block_header, kzg_commitments_inclusion_proof) = block + .signed_block_header_and_kzg_commitments_proof() + .unwrap(); + + let blob_sidecar = BlobSidecar::new_with_existing_proof( + index as usize, + blob, + &block, + signed_block_header, + &kzg_commitments_inclusion_proof, + kzg_proof, + ) + .unwrap(); + + assert!(blob_sidecar.verify_blob_sidecar_inclusion_proof()); + } + #[test] fn test_verify_blob_inclusion_proof_invalid() { let (_block, blobs) = diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 8cf118eea5..222b9292a2 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, + Eq, Serialize, Deserialize, Encode, @@ -37,14 +38,15 @@ pub struct Validator { impl Validator { #[allow(clippy::arithmetic_side_effects)] pub fn from_deposit( - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, amount: u64, fork_name: ForkName, spec: &ChainSpec, ) -> Self { let mut validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, + pubkey, + withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, @@ -290,16 +292,6 @@ impl Validator { spec.max_effective_balance } } - - pub fn get_active_balance( - &self, - validator_balance: u64, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { - let max_effective_balance = self.get_max_effective_balance(spec, current_fork); - std::cmp::min(validator_balance, max_effective_balance) - } } impl Default for Validator { diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index b65b51230c..d02e01b80c 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -6,18 +6,18 @@ edition = { workspace = true } [dependencies] alloy-primitives = { workspace = true } -ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } -rand = { workspace = true } -serde = { workspace = true } -ethereum_serde_utils = { workspace = true } -hex = { workspace = true } -ethereum_hashing = { workspace = true } arbitrary = { workspace = true } -zeroize = { workspace = true } blst = { version = "0.3.3", optional = true } -safe_arith = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } fixed_bytes = { workspace = true } +hex = { workspace = true } +rand = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } +tree_hash = { workspace = true } +zeroize = { workspace = true } [features] arbitrary = [] diff --git a/crypto/bls/src/macros.rs b/crypto/bls/src/macros.rs index f3a7374ba7..58b1ec7d6c 100644 --- a/crypto/bls/src/macros.rs +++ b/crypto/bls/src/macros.rs @@ -20,7 +20,7 @@ macro_rules! impl_tree_hash { // but benchmarks have show that to be at least 15% slower because of the // unnecessary copying and allocation (one Vec per byte) let values_per_chunk = tree_hash::BYTES_PER_CHUNK; - let minimum_chunk_count = ($byte_size + values_per_chunk - 1) / values_per_chunk; + let minimum_chunk_count = $byte_size.div_ceil(values_per_chunk); tree_hash::merkle_root(&self.serialize(), minimum_chunk_count) } }; diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index a0237ba7ed..a893a9360d 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -3,15 +3,14 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = { workspace = true } -zeroize = { workspace = true } +bls = { workspace = true } num-bigint-dig = { version = "0.8.4", features = ["zeroize"] } ring = { workspace = true } -bls = { workspace = true } +sha2 = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index bb6222807b..61d2722efb 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -3,25 +3,24 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } +aes = { version = "0.7", features = ["ctr"] } +bls = { workspace = true } +eth2_key_derivation = { workspace = true } +hex = { workspace = true } hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } +rand = { workspace = true } scrypt = { version = "0.7.0", default-features = false } +serde = { workspace = true } +serde_json = { workspace = true } +serde_repr = { workspace = true } sha2 = { workspace = true } +unicode-normalization = "0.1.16" uuid = { workspace = true } zeroize = { workspace = true } -serde = { workspace = true } -serde_repr = { workspace = true } -hex = { workspace = true } -bls = { workspace = true } -serde_json = { workspace = true } -eth2_key_derivation = { workspace = true } -unicode-normalization = "0.1.16" -aes = { version = "0.7", features = ["ctr"] } [dev-dependencies] tempfile = { workspace = true } diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index 304ea3ecd6..16a979cf63 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -26,7 +26,7 @@ use std::io::{Read, Write}; use std::path::Path; use std::str; use unicode_normalization::UnicodeNormalization; -use zeroize::Zeroize; +use zeroize::Zeroizing; /// The byte-length of a BLS secret key. const SECRET_KEY_LEN: usize = 32; @@ -60,45 +60,6 @@ pub const HASH_SIZE: usize = 32; /// The default iteraction count, `c`, for PBKDF2. pub const DEFAULT_PBKDF2_C: u32 = 262_144; -/// Provides a new-type wrapper around `String` that is zeroized on `Drop`. -/// -/// Useful for ensuring that password memory is zeroed-out on drop. -#[derive(Clone, PartialEq, Serialize, Deserialize, Zeroize)] -#[zeroize(drop)] -#[serde(transparent)] -struct ZeroizeString(String); - -impl From for ZeroizeString { - fn from(s: String) -> Self { - Self(s) - } -} - -impl AsRef<[u8]> for ZeroizeString { - fn as_ref(&self) -> &[u8] { - self.0.as_bytes() - } -} - -impl std::ops::Deref for ZeroizeString { - type Target = String; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl std::ops::DerefMut for ZeroizeString { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl FromIterator for ZeroizeString { - fn from_iter>(iter: T) -> Self { - ZeroizeString(String::from_iter(iter)) - } -} - #[derive(Debug, PartialEq)] pub enum Error { InvalidSecretKeyLen { len: usize, expected: usize }, @@ -451,11 +412,12 @@ fn is_control_character(c: char) -> bool { /// Takes a slice of bytes and returns a NFKD normalized string representation. /// /// Returns an error if the bytes are not valid utf8. -fn normalize(bytes: &[u8]) -> Result { +fn normalize(bytes: &[u8]) -> Result, Error> { Ok(str::from_utf8(bytes) .map_err(|_| Error::InvalidPasswordBytes)? .nfkd() - .collect::()) + .collect::() + .into()) } /// Generates a checksum to indicate that the `derived_key` is associated with the diff --git a/crypto/eth2_keystore/tests/eip2335_vectors.rs b/crypto/eth2_keystore/tests/eip2335_vectors.rs index 3702a21816..e6852cc608 100644 --- a/crypto/eth2_keystore/tests/eip2335_vectors.rs +++ b/crypto/eth2_keystore/tests/eip2335_vectors.rs @@ -58,7 +58,7 @@ fn eip2335_test_vector_scrypt() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("1d85ae20-35c5-4611-98e8-aa14a633906f").unwrap(), @@ -102,7 +102,7 @@ fn eip2335_test_vector_pbkdf() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("64625def-3331-4eea-ab6f-782f3ed16a83").unwrap(), diff --git a/crypto/eth2_keystore/tests/tests.rs b/crypto/eth2_keystore/tests/tests.rs index 0df884b8a2..20bf9f1653 100644 --- a/crypto/eth2_keystore/tests/tests.rs +++ b/crypto/eth2_keystore/tests/tests.rs @@ -54,25 +54,17 @@ fn file() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - let keystore = KeystoreBuilder::new(&keypair, GOOD_PASSWORD, "".into()) .unwrap() .build() .unwrap(); keystore - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Keystore::from_json_reader(&mut get_file()).expect("should read from file"); + let decoded = + Keystore::from_json_reader(File::open(&path).unwrap()).expect("should read from file"); assert_eq!( decoded.decrypt_keypair(BAD_PASSWORD).err().unwrap(), diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index f3af6aab59..5327bdc163 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -3,18 +3,17 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +eth2_key_derivation = { workspace = true } +eth2_keystore = { workspace = true } +rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } -uuid = { workspace = true } -rand = { workspace = true } -eth2_keystore = { workspace = true } -eth2_key_derivation = { workspace = true } tiny-bip39 = "1" +uuid = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_wallet/tests/tests.rs b/crypto/eth2_wallet/tests/tests.rs index fe4565e0db..3dc073f764 100644 --- a/crypto/eth2_wallet/tests/tests.rs +++ b/crypto/eth2_wallet/tests/tests.rs @@ -132,20 +132,11 @@ fn file_round_trip() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - wallet - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Wallet::from_json_reader(&mut get_file()).unwrap(); + let decoded = Wallet::from_json_reader(File::open(&path).unwrap()).unwrap(); assert_eq!( decoded.decrypt_seed(&[1, 2, 3]).err().unwrap(), diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index ce55f83639..bfe0f19cd0 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -3,22 +3,21 @@ name = "kzg" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = "2021" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] arbitrary = { workspace = true } +c-kzg = { workspace = true } +derivative = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -derivative = { workspace = true } -serde = { workspace = true } -ethereum_serde_utils = { workspace = true } hex = { workspace = true } -ethereum_hashing = { workspace = true } -c-kzg = { workspace = true } rust_eth_kzg = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } +tree_hash = { workspace = true } [dev-dependencies] criterion = { workspace = true } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index f788be265a..7aaa1d9919 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -99,7 +99,7 @@ impl<'de> Deserialize<'de> for G1Point { { struct G1PointVisitor; - impl<'de> Visitor<'de> for G1PointVisitor { + impl Visitor<'_> for G1PointVisitor { type Value = G1Point; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str("A 48 byte hex encoded string") @@ -135,7 +135,7 @@ impl<'de> Deserialize<'de> for G2Point { { struct G2PointVisitor; - impl<'de> Visitor<'de> for G2PointVisitor { + impl Visitor<'_> for G2PointVisitor { type Value = G2Point; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str("A 96 byte hex encoded string") diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 96176f3fba..a7a54b1416 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,8 +10,8 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -store = { workspace = true } -types = { workspace = true } -slog = { workspace = true } -strum = { workspace = true } serde = { workspace = true } +slog = { workspace = true } +store = { workspace = true } +strum = { workspace = true } +types = { workspace = true } diff --git a/database_manager/src/cli.rs b/database_manager/src/cli.rs index 5521b97805..4246a51f89 100644 --- a/database_manager/src/cli.rs +++ b/database_manager/src/cli.rs @@ -3,6 +3,7 @@ use clap_utils::get_color_style; use clap_utils::FLAG_HEADER; use serde::{Deserialize, Serialize}; use std::path::PathBuf; +use store::hdiff::HierarchyConfig; use crate::InspectTarget; @@ -21,13 +22,14 @@ use crate::InspectTarget; pub struct DatabaseManager { #[clap( long, - value_name = "SLOT_COUNT", - help = "Specifies how often a freezer DB restore point should be stored. \ - Cannot be changed after initialization. \ - [default: 2048 (mainnet) or 64 (minimal)]", + global = true, + value_name = "N0,N1,N2,...", + help = "Specifies the frequency for storing full state snapshots and hierarchical \ + diffs in the freezer DB.", + default_value_t = HierarchyConfig::default(), display_order = 0 )] - pub slots_per_restore_point: Option, + pub hierarchy_exponents: HierarchyConfig, #[clap( long, diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 3d55631848..fc15e98616 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -6,7 +6,7 @@ use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, schema_change::migrate_schema, slot_clock::SystemTimeSlotClock, }; -use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; +use beacon_node::{get_data_dir, ClientConfig}; use clap::ArgMatches; use clap::ValueEnum; use cli::{Compact, Inspect}; @@ -16,7 +16,6 @@ use slog::{info, warn, Logger}; use std::fs; use std::io::Write; use std::path::PathBuf; -use store::metadata::STATE_UPPER_LIMIT_NO_RETAIN; use store::{ errors::Error, metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, @@ -39,13 +38,8 @@ fn parse_client_config( client_config .blobs_db_path .clone_from(&database_manager_config.blobs_dir); - - let (sprp, sprp_explicit) = - get_slots_per_restore_point::(database_manager_config.slots_per_restore_point)?; - - client_config.store.slots_per_restore_point = sprp; - client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; client_config.store.blob_prune_margin_epochs = database_manager_config.blob_prune_margin_epochs; + client_config.store.hierarchy_config = database_manager_config.hierarchy_exponents.clone(); Ok(client_config) } @@ -298,6 +292,7 @@ fn parse_migrate_config(migrate_config: &Migrate) -> Result( migrate_config: MigrateConfig, client_config: ClientConfig, + mut genesis_state: BeaconState, runtime_context: &RuntimeContext, log: Logger, ) -> Result<(), Error> { @@ -328,13 +323,13 @@ pub fn migrate_db( "to" => to.as_u64(), ); + let genesis_state_root = genesis_state.canonical_root()?; migrate_schema::, _, _, _>>( db, - client_config.eth1.deposit_contract_deploy_block, + Some(genesis_state_root), from, to, log, - &spec, ) } @@ -426,8 +421,7 @@ pub fn prune_states( // correct network, and that we don't end up storing the wrong genesis state. let genesis_from_db = db .load_cold_state_by_slot(Slot::new(0)) - .map_err(|e| format!("Error reading genesis state: {e:?}"))? - .ok_or("Error: genesis state missing from database. Check schema version.")?; + .map_err(|e| format!("Error reading genesis state: {e:?}"))?; if genesis_from_db.genesis_validators_root() != genesis_state.genesis_validators_root() { return Err(format!( @@ -438,18 +432,12 @@ pub fn prune_states( // Check that the user has confirmed they want to proceed. if !prune_config.confirm { - match db.get_anchor_info() { - Some(anchor_info) - if anchor_info.state_lower_limit == 0 - && anchor_info.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN => - { - info!(log, "States have already been pruned"); - return Ok(()); - } - _ => { - info!(log, "Ready to prune states"); - } + if db.get_anchor_info().full_state_pruning_enabled() { + info!(log, "States have already been pruned"); + return Ok(()); } + + info!(log, "Ready to prune states"); warn!( log, "Pruning states is irreversible"; @@ -484,10 +472,33 @@ pub fn run( let log = context.log().clone(); let format_err = |e| format!("Fatal error: {:?}", e); + let get_genesis_state = || { + let executor = env.core_context().executor; + let network_config = context + .eth2_network_config + .clone() + .ok_or("Missing network config")?; + + executor + .block_on_dangerous( + network_config.genesis_state::( + client_config.genesis_state_url.as_deref(), + client_config.genesis_state_url_timeout, + &log, + ), + "get_genesis_state", + ) + .ok_or("Shutting down")? + .map_err(|e| format!("Error getting genesis state: {e}"))? + .ok_or("Genesis state missing".to_string()) + }; + match &db_manager_config.subcommand { cli::DatabaseManagerSubcommand::Migrate(migrate_config) => { let migrate_config = parse_migrate_config(migrate_config)?; - migrate_db(migrate_config, client_config, &context, log).map_err(format_err) + let genesis_state = get_genesis_state()?; + migrate_db(migrate_config, client_config, genesis_state, &context, log) + .map_err(format_err) } cli::DatabaseManagerSubcommand::Inspect(inspect_config) => { let inspect_config = parse_inspect_config(inspect_config)?; @@ -503,27 +514,8 @@ pub fn run( prune_blobs(client_config, &context, log).map_err(format_err) } cli::DatabaseManagerSubcommand::PruneStates(prune_states_config) => { - let executor = env.core_context().executor; - let network_config = context - .eth2_network_config - .clone() - .ok_or("Missing network config")?; - - let genesis_state = executor - .block_on_dangerous( - network_config.genesis_state::( - client_config.genesis_state_url.as_deref(), - client_config.genesis_state_url_timeout, - &log, - ), - "get_genesis_state", - ) - .ok_or("Shutting down")? - .map_err(|e| format!("Error getting genesis state: {e}"))? - .ok_or("Genesis state missing")?; - let prune_config = parse_prune_states_config(prune_states_config)?; - + let genesis_state = get_genesis_state()?; prune_states(client_config, prune_config, genesis_state, &context, log) } cli::DatabaseManagerSubcommand::Compact(compact_config) => { diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 77d122efb7..72be77a70b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.3.0" +version = "6.0.1" authors = ["Paul Hauner "] edition = { workspace = true } @@ -11,36 +11,36 @@ fake_crypto = ['bls/fake_crypto'] jemalloc = ["malloc_utils/jemalloc"] [dependencies] +account_utils = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } clap = { workspace = true } -log = { workspace = true } -sloggers = { workspace = true } -serde = { workspace = true } -serde_yaml = { workspace = true } -serde_json = { workspace = true } +clap_utils = { workspace = true } +deposit_contract = { workspace = true } env_logger = { workspace = true } -types = { workspace = true } -state_processing = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } +eth2_wallet = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } -clap_utils = { workspace = true } -lighthouse_network = { workspace = true } -validator_dir = { workspace = true } -lighthouse_version = { workspace = true } -account_utils = { workspace = true } -eth2_wallet = { workspace = true } -eth2 = { workspace = true } -snap = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } -malloc_utils = { workspace = true } -rayon = { workspace = true } execution_layer = { workspace = true } hex = { workspace = true } +lighthouse_network = { workspace = true } +lighthouse_version = { workspace = true } +log = { workspace = true } +malloc_utils = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +sloggers = { workspace = true } +snap = { workspace = true } +state_processing = { workspace = true } +store = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 7c37aa6d67..eda9a2ebf2 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.3.0" +version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false @@ -34,43 +34,46 @@ malloc_utils = { workspace = true, features = ["jemalloc"] } malloc_utils = { workspace = true } [dependencies] -beacon_node = { workspace = true } -slog = { workspace = true } -types = { workspace = true } -bls = { workspace = true } -ethereum_hashing = { workspace = true } -clap = { workspace = true } -environment = { workspace = true } -boot_node = { path = "../boot_node" } -futures = { workspace = true } -validator_client = { workspace = true } account_manager = { "path" = "../account_manager" } -clap_utils = { workspace = true } -eth2_network_config = { workspace = true } -lighthouse_version = { workspace = true } account_utils = { workspace = true } -lighthouse_metrics = { workspace = true } +beacon_node = { workspace = true } +bls = { workspace = true } +boot_node = { path = "../boot_node" } +clap = { workspace = true } +clap_utils = { workspace = true } +database_manager = { path = "../database_manager" } +directory = { workspace = true } +environment = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_hashing = { workspace = true } +futures = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } +malloc_utils = { workspace = true } +metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -task_executor = { workspace = true } -malloc_utils = { workspace = true } -directory = { workspace = true } -unused_port = { workspace = true } -database_manager = { path = "../database_manager" } slasher = { workspace = true } +slog = { workspace = true } +task_executor = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } +validator_client = { workspace = true } validator_manager = { path = "../validator_manager" } -logging = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } -validator_dir = { workspace = true } -slashing_protection = { workspace = true } -lighthouse_network = { workspace = true } -sensitive_url = { workspace = true } +beacon_node_fallback = { workspace = true } +beacon_processor = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -beacon_processor = { workspace = true } +initialized_validators = { workspace = true } +lighthouse_network = { workspace = true } +sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +tempfile = { workspace = true } +validator_dir = { workspace = true } +zeroize = { workspace = true } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index f95751392c..02b8e0b655 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -6,19 +6,19 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -sloggers = { workspace = true } -types = { workspace = true } eth2_config = { workspace = true } -task_executor = { workspace = true } eth2_network_config = { workspace = true } -logging = { workspace = true } -slog-term = { workspace = true } -slog-async = { workspace = true } futures = { workspace = true } -slog-json = "2.3.0" +logging = { workspace = true } serde = { workspace = true } +slog = { workspace = true } +slog-async = { workspace = true } +slog-json = "2.3.0" +slog-term = { workspace = true } +sloggers = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index e33e4cb9b8..43c5e1107c 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -81,7 +81,7 @@ fn build_profile_name() -> String { std::env!("OUT_DIR") .split(std::path::MAIN_SEPARATOR) .nth_back(3) - .unwrap_or_else(|| "unknown") + .unwrap_or("unknown") .to_string() } diff --git a/lighthouse/src/metrics.rs b/lighthouse/src/metrics.rs index 0002b43e7b..30e0120582 100644 --- a/lighthouse/src/metrics.rs +++ b/lighthouse/src/metrics.rs @@ -1,5 +1,5 @@ -pub use lighthouse_metrics::*; use lighthouse_version::VERSION; +pub use metrics::*; use slog::{error, Logger}; use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 4d15593714..d53d042fa4 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -15,7 +15,7 @@ use account_manager::{ use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, - ZeroizeString, STDIN_INPUTS_FLAG, + STDIN_INPUTS_FLAG, }; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::env; @@ -27,6 +27,7 @@ use std::str::from_utf8; use tempfile::{tempdir, TempDir}; use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; +use zeroize::Zeroizing; /// Returns the `lighthouse account` command. fn account_cmd() -> Command { @@ -114,7 +115,7 @@ fn create_wallet>( .arg(base_dir.as_ref().as_os_str()) .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) - .arg(&name) + .arg(name) .arg(format!("--{}", PASSWORD_FLAG)) .arg(password.as_ref().as_os_str()) .arg(format!("--{}", MNEMONIC_FLAG)) @@ -272,16 +273,16 @@ impl TestValidator { .expect("stdout is not utf8") .to_string(); - if stdout == "" { + if stdout.is_empty() { return Ok(vec![]); } let pubkeys = stdout[..stdout.len() - 1] .split("\n") - .filter_map(|line| { + .map(|line| { let tab = line.find("\t").expect("line must have tab"); let (_, pubkey) = line.split_at(tab + 1); - Some(pubkey.to_string()) + pubkey.to_string() }) .collect::>(); @@ -445,7 +446,9 @@ fn validator_import_launchpad() { } } - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); @@ -498,12 +501,12 @@ fn validator_import_launchpad() { signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, - voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + voting_keystore_password: Some(Zeroizing::from(PASSWORD.to_string())), }, }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); @@ -524,7 +527,7 @@ fn validator_import_launchpad() { expected_def.enabled = true; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -581,7 +584,7 @@ fn validator_import_launchpad_no_password_then_add_password() { let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write("\n".as_bytes()).unwrap(); + stdin.write_all("\n".as_bytes()).unwrap(); child.wait().unwrap(); assert!( @@ -627,14 +630,16 @@ fn validator_import_launchpad_no_password_then_add_password() { }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); let expected_def = ValidatorDefinition { @@ -650,13 +655,13 @@ fn validator_import_launchpad_no_password_then_add_password() { signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), voting_keystore_password_path: None, - voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + voting_keystore_password: Some(Zeroizing::from(PASSWORD.to_string())), }, }; let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -753,12 +758,12 @@ fn validator_import_launchpad_password_file() { signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, - voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + voting_keystore_password: Some(Zeroizing::from(PASSWORD.to_string())), }, }; assert!( - defs.as_slice() == &[expected_def], + defs.as_slice() == [expected_def], "validator defs file should be accurate" ); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ac7ddcdbd9..88e05dfa12 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -9,7 +9,6 @@ use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; -use lighthouse_version; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -24,7 +23,9 @@ use types::non_zero_usize::new_non_zero_usize; use types::{Address, Checkpoint, Epoch, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; -const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; +const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; +const DEFAULT_EXECUTION_JWT_SECRET_KEY: &str = + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; // These dummy ports should ONLY be used for `enr-xxx-port` flags that do not bind. const DUMMY_ENR_TCP_PORT: u16 = 7777; @@ -52,6 +53,18 @@ struct CommandLineTest { } impl CommandLineTest { fn new() -> CommandLineTest { + let mut base_cmd = base_cmd(); + + base_cmd + .arg("--execution-endpoint") + .arg(DEFAULT_EXECUTION_ENDPOINT) + .arg("--execution-jwt-secret-key") + .arg(DEFAULT_EXECUTION_JWT_SECRET_KEY); + CommandLineTest { cmd: base_cmd } + } + + // Required for testing different JWT authentication methods. + fn new_with_no_execution_endpoint() -> CommandLineTest { let base_cmd = base_cmd(); CommandLineTest { cmd: base_cmd } } @@ -104,7 +117,7 @@ fn staking_flag() { assert!(config.sync_eth1_chain); assert_eq!( config.eth1.endpoint.get_endpoint().to_string(), - DEFAULT_ETH1_ENDPOINT + DEFAULT_EXECUTION_ENDPOINT ); }); } @@ -114,7 +127,7 @@ fn allow_insecure_genesis_sync_default() { CommandLineTest::new() .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, false); + assert!(!config.allow_insecure_genesis_sync); }); } @@ -132,7 +145,7 @@ fn allow_insecure_genesis_sync_enabled() { .flag("allow-insecure-genesis-sync", None) .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, true); + assert!(config.allow_insecure_genesis_sync); }); } @@ -253,7 +266,7 @@ fn always_prepare_payload_default() { #[test] fn always_prepare_payload_override() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("always-prepare-payload", None) .flag( "suggested-fee-recipient", @@ -345,11 +358,11 @@ fn default_graffiti() { #[test] fn trusted_peers_flag() { - let peers = vec![PeerId::random(), PeerId::random()]; + let peers = [PeerId::random(), PeerId::random()]; CommandLineTest::new() .flag( "trusted-peers", - Some(format!("{},{}", peers[0].to_string(), peers[1].to_string()).as_str()), + Some(format!("{},{}", peers[0], peers[1]).as_str()), ) .run_with_zero_port() .with_config(|config| { @@ -369,7 +382,7 @@ fn genesis_backfill_flag() { CommandLineTest::new() .flag("genesis-backfill", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } /// The genesis backfill flag should be enabled if historic states flag is set. @@ -378,17 +391,18 @@ fn genesis_backfill_with_historic_flag() { CommandLineTest::new() .flag("reconstruct-historic-states", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } // Tests for Eth1 flags. +// DEPRECATED but should not crash #[test] fn dummy_eth1_flag() { CommandLineTest::new() .flag("dummy-eth1", None) - .run_with_zero_port() - .with_config(|config| assert!(config.dummy_eth1_backend)); + .run_with_zero_port(); } +// DEPRECATED but should not crash #[test] fn eth1_flag() { CommandLineTest::new() @@ -433,7 +447,7 @@ fn eth1_cache_follow_distance_manual() { // Tests for Bellatrix flags. fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; - let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; + let urls = ["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; // we don't support redundancy for execution-endpoints // only the first provided endpoint is parsed. @@ -459,16 +473,16 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { // this is way better but intersperse is still a nightly feature :/ // let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag(flag, Some(&endpoint_arg)) .flag("execution-jwt", Some(&jwts_arg)) .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoint.is_some(), true); + assert!(config.execution_endpoint.is_some()); assert_eq!( config.execution_endpoint.as_ref().unwrap().clone(), - SensitiveUrl::parse(&urls[0]).unwrap() + SensitiveUrl::parse(urls[0]).unwrap() ); // Only the first secret file should be used. assert_eq!( @@ -480,7 +494,7 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { #[test] fn run_execution_jwt_secret_key_is_persisted() { let jwt_secret_key = "0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33"; - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some("http://localhost:8551/")) .flag("execution-jwt-secret-key", Some(jwt_secret_key)) .run_with_zero_port() @@ -501,7 +515,7 @@ fn run_execution_jwt_secret_key_is_persisted() { #[test] fn execution_timeout_multiplier_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", @@ -528,7 +542,7 @@ fn bellatrix_jwt_secrets_flag() { let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") .expect("Unable to write to file"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "jwt-secrets", @@ -550,7 +564,7 @@ fn bellatrix_jwt_secrets_flag() { #[test] fn bellatrix_fee_recipient_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", @@ -580,7 +594,7 @@ fn run_payload_builder_flag_test(flag: &str, builders: &str) { let config = config.execution_layer.as_ref().unwrap(); // Only first provided endpoint is parsed as we don't support // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); + assert_eq!(config.builder_url, all_builders.first().cloned()); }) } fn run_payload_builder_flag_test_with_config( @@ -591,7 +605,7 @@ fn run_payload_builder_flag_test_with_config( f: F, ) { let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut test = CommandLineTest::new(); + let mut test = CommandLineTest::new_with_no_execution_endpoint(); test.flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", @@ -646,7 +660,7 @@ fn builder_fallback_flags() { Some("builder-fallback-disable-checks"), None, |config| { - assert_eq!(config.chain.builder_fallback_disable_checks, true); + assert!(config.chain.builder_fallback_disable_checks); }, ); } @@ -713,7 +727,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let jwt_file = "jwt-file"; let id = "bn-1"; let version = "Lighthouse-v2.1.3"; - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoint", Some(execution_endpoint)) .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) .flag(jwt_id_flag, Some(id)) @@ -800,6 +814,27 @@ fn network_enable_sampling_flag() { .run_with_zero_port() .with_config(|config| assert!(config.chain.enable_sampling)); } +#[test] +fn blob_publication_batches() { + CommandLineTest::new() + .flag("blob-publication-batches", Some("3")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.blob_publication_batches, 3)); +} + +#[test] +fn blob_publication_batch_interval() { + CommandLineTest::new() + .flag("blob-publication-batch-interval", Some("400")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.blob_publication_batch_interval, + Duration::from_millis(400) + ) + }); +} + #[test] fn network_enable_sampling_flag_default() { CommandLineTest::new() @@ -1621,19 +1656,19 @@ fn http_enable_beacon_processor() { CommandLineTest::new() .flag("http", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); + .with_config(|config| assert!(!config.http_api.enable_beacon_processor)); } #[test] fn http_tls_flags() { @@ -1784,45 +1819,12 @@ fn validator_monitor_metrics_threshold_custom() { } // Tests for Store flags. +// DEPRECATED but should still be accepted. #[test] fn slots_per_restore_point_flag() { CommandLineTest::new() .flag("slots-per-restore-point", Some("64")) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.slots_per_restore_point, 64)); -} -#[test] -fn slots_per_restore_point_update_prev_default() { - use beacon_node::beacon_chain::store::config::{ - DEFAULT_SLOTS_PER_RESTORE_POINT, PREV_DEFAULT_SLOTS_PER_RESTORE_POINT, - }; - - CommandLineTest::new() - .flag("slots-per-restore-point", Some("2048")) - .run_with_zero_port() - .with_config_and_dir(|config, dir| { - // Check that 2048 is the previous default. - assert_eq!( - config.store.slots_per_restore_point, - PREV_DEFAULT_SLOTS_PER_RESTORE_POINT - ); - - // Restart the BN with the same datadir and the new default SPRP. It should - // allow this. - CommandLineTest::new() - .flag("datadir", Some(&dir.path().display().to_string())) - .flag("zero-ports", None) - .run_with_no_datadir() - .with_config(|config| { - // The dumped config will have the new default 8192 value, but the fact that - // the BN started and ran (with the same datadir) means that the override - // was successful. - assert_eq!( - config.store.slots_per_restore_point, - DEFAULT_SLOTS_PER_RESTORE_POINT - ); - }); - }) + .run_with_zero_port(); } #[test] @@ -1870,6 +1872,27 @@ fn historic_state_cache_size_default() { }); } #[test] +fn hdiff_buffer_cache_size_flag() { + CommandLineTest::new() + .flag("hdiff-buffer-cache-size", Some("1")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.store.hdiff_buffer_cache_size.get(), 1); + }); +} +#[test] +fn hdiff_buffer_cache_size_default() { + use beacon_node::beacon_chain::store::config::DEFAULT_HDIFF_BUFFER_CACHE_SIZE; + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store.hdiff_buffer_cache_size, + DEFAULT_HDIFF_BUFFER_CACHE_SIZE + ); + }); +} +#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false")) @@ -2197,7 +2220,7 @@ fn slasher_broadcast_flag_false() { }); } -#[cfg(all(feature = "slasher-lmdb"))] +#[cfg(feature = "slasher-lmdb")] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend @@ -2405,7 +2428,7 @@ fn logfile_no_restricted_perms_flag() { .flag("logfile-no-restricted-perms", None) .run_with_zero_port() .with_config(|config| { - assert!(config.logger_config.is_restricted == false); + assert!(!config.logger_config.is_restricted); }); } #[test] @@ -2430,26 +2453,26 @@ fn logfile_format_flag() { fn sync_eth1_chain_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] fn sync_eth1_chain_execution_endpoints_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "execution-jwt", dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] fn sync_eth1_chain_disable_deposit_contract_sync_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new() + CommandLineTest::new_with_no_execution_endpoint() .flag("disable-deposit-contract-sync", None) .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( @@ -2457,7 +2480,22 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); + .with_config(|config| assert!(!config.sync_eth1_chain)); +} + +#[test] +#[should_panic] +fn disable_deposit_contract_sync_conflicts_with_staking() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new_with_no_execution_endpoint() + .flag("disable-deposit-contract-sync", None) + .flag("staking", None) + .flag("execution-endpoints", Some("http://localhost:8551/")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .run_with_zero_port(); } #[test] @@ -2465,9 +2503,9 @@ fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, false); - assert_eq!(config.chain.enable_light_client_server, false); - assert_eq!(config.http_api.enable_light_client_server, false); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); + assert!(!config.http_api.enable_light_client_server); }); } @@ -2477,8 +2515,8 @@ fn light_client_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, true); - assert_eq!(config.chain.enable_light_client_server, true); + assert!(config.network.enable_light_client_server); + assert!(config.chain.enable_light_client_server); }); } @@ -2489,7 +2527,7 @@ fn light_client_http_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.enable_light_client_server, true); + assert!(config.http_api.enable_light_client_server); }); } diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 659dea468d..b243cd6001 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -149,7 +149,7 @@ fn disable_packet_filter_flag() { .flag("disable-packet-filter", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.disable_packet_filter, true); + assert!(config.disable_packet_filter); }); } @@ -159,7 +159,7 @@ fn enable_enr_auto_update_flag() { .flag("enable-enr-auto-update", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.enable_enr_auto_update, true); + assert!(config.enable_enr_auto_update); }); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 147a371f0e..c5b303e4d1 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -1,9 +1,8 @@ -use validator_client::{ - config::DEFAULT_WEB3SIGNER_KEEP_ALIVE, ApiTopic, BeaconNodeSyncDistanceTiers, Config, -}; +use beacon_node_fallback::{beacon_node_health::BeaconNodeSyncDistanceTiers, ApiTopic}; use crate::exec::CommandLineTestExec; use bls::{Keypair, PublicKeyBytes}; +use initialized_validators::DEFAULT_WEB3SIGNER_KEEP_ALIVE; use sensitive_url::SensitiveUrl; use std::fs::File; use std::io::Write; @@ -15,6 +14,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::{Address, Slot}; +use validator_client::Config; /// Returns the `lighthouse validator_client` command. fn base_cmd() -> Command { @@ -136,7 +136,7 @@ fn beacon_nodes_tls_certs_flag() { .flag( "beacon-nodes-tls-certs", Some( - vec![ + [ dir.path().join("certificate.crt").to_str().unwrap(), dir.path().join("certificate2.crt").to_str().unwrap(), ] @@ -205,7 +205,7 @@ fn graffiti_file_with_pk_flag() { let mut file = File::create(dir.path().join("graffiti.txt")).expect("Unable to create file"); let new_key = Keypair::random(); let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!("{}:nice-graffiti", pubkeybytes.to_string()); + let contents = format!("{}:nice-graffiti", pubkeybytes); file.write_all(contents.as_bytes()) .expect("Unable to write to file"); CommandLineTest::new() @@ -240,7 +240,7 @@ fn fee_recipient_flag() { .run() .with_config(|config| { assert_eq!( - config.fee_recipient, + config.validator_store.fee_recipient, Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) ) }); @@ -344,6 +344,21 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); } +#[test] +fn http_token_path_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("http", None) + .flag("http-token-path", dir.path().join("api-token.txt").to_str()) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + dir.path().join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -404,13 +419,13 @@ pub fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, false)); + .with_config(|config| assert!(!config.http_metrics.allocator_metrics_enabled)); } #[test] pub fn malloc_tuning_default() { CommandLineTest::new() .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, true)); + .with_config(|config| assert!(config.http_metrics.allocator_metrics_enabled)); } #[test] fn doppelganger_protection_flag() { @@ -430,7 +445,7 @@ fn no_doppelganger_protection_flag() { fn no_gas_limit_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(config.gas_limit.is_none())); + .with_config(|config| assert!(config.validator_store.gas_limit.is_none())); } #[test] fn gas_limit_flag() { @@ -438,46 +453,46 @@ fn gas_limit_flag() { .flag("gas-limit", Some("600")) .flag("builder-proposals", None) .run() - .with_config(|config| assert_eq!(config.gas_limit, Some(600))); + .with_config(|config| assert_eq!(config.validator_store.gas_limit, Some(600))); } #[test] fn no_builder_proposals_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(!config.builder_proposals)); + .with_config(|config| assert!(!config.validator_store.builder_proposals)); } #[test] fn builder_proposals_flag() { CommandLineTest::new() .flag("builder-proposals", None) .run() - .with_config(|config| assert!(config.builder_proposals)); + .with_config(|config| assert!(config.validator_store.builder_proposals)); } #[test] fn builder_boost_factor_flag() { CommandLineTest::new() .flag("builder-boost-factor", Some("150")) .run() - .with_config(|config| assert_eq!(config.builder_boost_factor, Some(150))); + .with_config(|config| assert_eq!(config.validator_store.builder_boost_factor, Some(150))); } #[test] fn no_builder_boost_factor_flag() { CommandLineTest::new() .run() - .with_config(|config| assert_eq!(config.builder_boost_factor, None)); + .with_config(|config| assert_eq!(config.validator_store.builder_boost_factor, None)); } #[test] fn prefer_builder_proposals_flag() { CommandLineTest::new() .flag("prefer-builder-proposals", None) .run() - .with_config(|config| assert!(config.prefer_builder_proposals)); + .with_config(|config| assert!(config.validator_store.prefer_builder_proposals)); } #[test] fn no_prefer_builder_proposals_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(!config.prefer_builder_proposals)); + .with_config(|config| assert!(!config.validator_store.prefer_builder_proposals)); } #[test] fn no_builder_registration_timestamp_override_flag() { @@ -624,7 +639,7 @@ fn validator_registration_batch_size_zero_value() { #[test] fn validator_disable_web3_signer_slashing_protection_default() { CommandLineTest::new().run().with_config(|config| { - assert!(config.enable_web3signer_slashing_protection); + assert!(config.validator_store.enable_web3signer_slashing_protection); }); } @@ -634,7 +649,7 @@ fn validator_disable_web3_signer_slashing_protection() { .flag("disable-slashing-protection-web3signer", None) .run() .with_config(|config| { - assert!(!config.enable_web3signer_slashing_protection); + assert!(!config.validator_store.enable_web3signer_slashing_protection); }); } @@ -642,7 +657,7 @@ fn validator_disable_web3_signer_slashing_protection() { fn validator_web3_signer_keep_alive_default() { CommandLineTest::new().run().with_config(|config| { assert_eq!( - config.web3_signer_keep_alive_timeout, + config.initialized_validators.web3_signer_keep_alive_timeout, DEFAULT_WEB3SIGNER_KEEP_ALIVE ); }); @@ -655,7 +670,7 @@ fn validator_web3_signer_keep_alive_override() { .run() .with_config(|config| { assert_eq!( - config.web3_signer_keep_alive_timeout, + config.initialized_validators.web3_signer_keep_alive_timeout, Some(Duration::from_secs(1)) ); }); diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index bca6a18ab5..04e3eafe6e 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -9,7 +9,9 @@ use tempfile::{tempdir, TempDir}; use types::*; use validator_manager::{ create_validators::CreateConfig, + delete_validators::DeleteConfig, import_validators::ImportConfig, + list_validators::ListConfig, move_validators::{MoveConfig, PasswordSource, Validators}, }; @@ -105,6 +107,18 @@ impl CommandLineTest { } } +impl CommandLineTest { + fn validators_list() -> Self { + Self::default().flag("list", None) + } +} + +impl CommandLineTest { + fn validators_delete() -> Self { + Self::default().flag("delete", None) + } +} + #[test] pub fn validator_create_without_output_path() { CommandLineTest::validators_create().assert_failed(); @@ -122,7 +136,7 @@ pub fn validator_create_defaults() { count: 1, deposit_gwei: MainnetEthSpec::default_spec().max_effective_balance, mnemonic_path: None, - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), disable_deposits: false, specify_voting_keystore_password: false, eth1_withdrawal_address: None, @@ -187,7 +201,7 @@ pub fn validator_create_disable_deposits() { .flag("--disable-deposits", None) .flag("--builder-proposals", Some("false")) .assert_success(|config| { - assert_eq!(config.disable_deposits, true); + assert!(config.disable_deposits); assert_eq!(config.builder_proposals, Some(false)); }); } @@ -199,10 +213,18 @@ pub fn validator_import_defaults() { .flag("--vc-token", Some("./token.json")) .assert_success(|config| { let expected = ImportConfig { - validators_file_path: PathBuf::from("./vals.json"), + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), ignore_duplicates: false, + password: None, + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }; assert_eq!(expected, config); }); @@ -216,10 +238,18 @@ pub fn validator_import_misc_flags() { .flag("--ignore-duplicates", None) .assert_success(|config| { let expected = ImportConfig { - validators_file_path: PathBuf::from("./vals.json"), + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), ignore_duplicates: true, + password: None, + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }; assert_eq!(expected, config); }); @@ -233,7 +263,17 @@ pub fn validator_import_missing_token() { } #[test] -pub fn validator_import_missing_validators_file() { +pub fn validator_import_using_both_file_flags() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .flag("--validators-file", Some("./vals.json")) + .flag("--keystore-file", Some("./keystore.json")) + .flag("--password", Some("abcd")) + .assert_failed(); +} + +#[test] +pub fn validator_import_missing_both_file_flags() { CommandLineTest::validators_import() .flag("--vc-token", Some("./token.json")) .assert_failed(); @@ -260,7 +300,7 @@ pub fn validator_move_defaults() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -310,7 +350,7 @@ pub fn validator_move_misc_flags_1() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--prefer-builder-proposals", Some("false")) .assert_success(|config| { @@ -328,7 +368,7 @@ pub fn validator_move_misc_flags_1() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -342,7 +382,7 @@ pub fn validator_move_misc_flags_2() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--builder-boost-factor", Some("100")) .assert_success(|config| { @@ -360,7 +400,7 @@ pub fn validator_move_misc_flags_2() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -388,9 +428,43 @@ pub fn validator_move_count() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); }); } + +#[test] +pub fn validator_list_defaults() { + CommandLineTest::validators_list() + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = ListConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_delete_defaults() { + CommandLineTest::validators_delete() + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = DeleteConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + validators_to_delete: vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ], + }; + assert_eq!(expected, config); + }); +} diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index ca701eb7e9..159c89badb 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,6 +1,6 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 geth execution clients using Kurtosis. +These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 Geth execution clients using Kurtosis. This setup can be useful for testing and development. ## Installation @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. +1. Install [`yq`](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -22,7 +22,7 @@ cd ./scripts/local_testnet It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. -Full configuration reference for kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). +Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: @@ -36,7 +36,7 @@ To view the logs: kurtosis service logs local-testnet $SERVICE_NAME ``` -where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and geth: +where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and Geth: ```bash kurtosis service logs local-testnet -f cl-1-lighthouse-geth diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 441e2a6357..5be5c13dee 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -71,7 +71,7 @@ if [[ "$BEHAVIOR" == "failure" ]]; then # This process should not last longer than 2 epochs vc_1_range_start=0 vc_1_range_end=$(($KEYS_PER_NODE - 1)) - vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end-0" + vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end" service_name=vc-1-doppelganger kurtosis service add \ @@ -107,7 +107,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then vc_4_range_start=$(($KEYS_PER_NODE * 3)) vc_4_range_end=$(($KEYS_PER_NODE * 4 - 1)) - vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end-0" + vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end" service_name=vc-4 kurtosis service add \ diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 034a1b71a3..fcecc2fc23 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -17,31 +17,31 @@ byteorder = { workspace = true } derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -lighthouse_metrics = { workspace = true } filesystem = { workspace = true } +flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } +lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lru = { workspace = true } -parking_lot = { workspace = true } -rand = { workspace = true } -safe_arith = { workspace = true } -serde = { workspace = true } -slog = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } -types = { workspace = true } -strum = { workspace = true } -ssz_types = { workspace = true } # MDBX is pinned at the last version with Windows and macOS support. mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } -lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } redb = { version = "2.1.4", optional = true } +safe_arith = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +ssz_types = { workspace = true } +strum = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } [dev-dependencies] +logging = { workspace = true } maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } -logging = { workspace = true } diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs index 46cf9a4a0c..af72006caa 100644 --- a/slasher/src/database/interface.rs +++ b/slasher/src/database/interface.rs @@ -192,7 +192,7 @@ impl<'env> RwTransaction<'env> { } } -impl<'env> Cursor<'env> { +impl Cursor<'_> { /// Return the first key in the current database while advancing the cursor's position. pub fn first_key(&mut self) -> Result, Error> { match self { diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index 2e49bd4aeb..cfeec2d74e 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SLASHER_DATABASE_SIZE: LazyLock> = LazyLock::new(|| { diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 6012283e11..d93f3a5578 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -12,28 +12,28 @@ portable = ["beacon_chain/portable"] [dependencies] alloy-primitives = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } derivative = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } +snap = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } types = { workspace = true } -snap = { workspace = true } -fs2 = { workspace = true } -beacon_chain = { workspace = true } -fork_choice = { workspace = true } -execution_layer = { workspace = true } -logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 390711079f..d5f4997bb7 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.6 +TESTS_TAG := v1.5.0-alpha.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 117c89a22f..dacca204c1 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -48,11 +48,6 @@ excluded_paths = [ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", - # TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - "tests/.*/electra/ssz_static/LightClientUpdate", - "tests/.*/electra/ssz_static/LightClientFinalityUpdate", - "tests/.*/electra/ssz_static/LightClientBootstrap", - "tests/.*/electra/merkle_proof", ] diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index dfd782a22b..c1adf10770 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -86,7 +86,7 @@ type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); type_name!(Eth1DataReset, "eth1_data_reset"); -type_name!(PendingBalanceDeposits, "pending_balance_deposits"); +type_name!(PendingBalanceDeposits, "pending_deposits"); type_name!(PendingConsolidations, "pending_consolidations"); type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); @@ -193,7 +193,7 @@ impl EpochTransition for PendingBalanceDeposits { state, spec, SinglePassConfig { - pending_balance_deposits: true, + pending_deposits: true, ..SinglePassConfig::disable_all() }, ) @@ -363,7 +363,7 @@ impl> Case for EpochProcessing { } if !fork_name.electra_enabled() - && (T::name() == "pending_consolidations" || T::name() == "pending_balance_deposits") + && (T::name() == "pending_consolidations" || T::name() == "pending_deposits") { return false; } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8d933a6fcd..427bcf5e9c 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -505,8 +505,8 @@ impl Tester { } Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), }; - let result = self - .block_on_dangerous(self.harness.chain.process_gossip_blob(blob, || Ok(())))?; + let result = + self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; if valid { assert!(result.is_ok()); } @@ -809,10 +809,13 @@ impl Tester { if expected_should_override_fcu.validator_is_connected { el.update_proposer_preparation( next_slot_epoch, - &[ProposerPreparationData { - validator_index: dbg!(proposer_index) as u64, - fee_recipient: Default::default(), - }], + [( + &ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }, + &None, + )], ) .await; } else { @@ -871,7 +874,7 @@ pub struct ManuallyVerifiedAttestation<'a, T: BeaconChainTypes> { indexed_attestation: IndexedAttestation, } -impl<'a, T: BeaconChainTypes> VerifiedAttestation for ManuallyVerifiedAttestation<'a, T> { +impl VerifiedAttestation for ManuallyVerifiedAttestation<'_, T> { fn attestation(&self) -> AttestationRef { self.attestation.to_ref() } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index b68bbdc5d3..49c0719784 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,8 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FixedVector, - FullPayload, Unsigned, + light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, + BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -22,13 +22,13 @@ pub struct MerkleProof { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] -pub struct MerkleProofValidity { +pub struct BeaconStateMerkleProofValidity { pub metadata: Option, pub state: BeaconState, pub merkle_proof: MerkleProof, } -impl LoadCase for MerkleProofValidity { +impl LoadCase for BeaconStateMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; @@ -49,11 +49,30 @@ impl LoadCase for MerkleProofValidity { } } -impl Case for MerkleProofValidity { +impl Case for BeaconStateMerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.update_tree_hash_cache().unwrap(); - let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + + let proof = match self.merkle_proof.leaf_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::CURRENT_SYNC_COMMITTEE_INDEX => { + state.compute_current_sync_committee_proof() + } + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + state.compute_next_sync_committee_proof() + } + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + | light_client_update::FINALIZED_ROOT_INDEX => state.compute_finalized_root_proof(), + _ => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof, invalid index".to_string(), + )); + } + }; + + let Ok(proof) = proof else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), )); @@ -198,3 +217,81 @@ impl Case for KzgInclusionMerkleProofValidity { } } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct BeaconBlockBodyMerkleProofValidity { + pub metadata: Option, + pub block_body: BeaconBlockBody>, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for BeaconBlockBodyMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block_body: BeaconBlockBody> = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => { + return Err(Error::InternalError(format!( + "Beacon block body merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Capella => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } + ForkName::Electra => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + Ok(Self { + metadata, + block_body, + merkle_proof, + }) + } +} + +impl Case for BeaconBlockBodyMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let binding = self.block_body.clone(); + let block_body = binding.to_ref(); + let Ok(proof) = block_body.block_body_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 5e928d2244..f4a09de32c 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -921,10 +921,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { #[derive(Derivative)] #[derivative(Default(bound = ""))] -pub struct MerkleProofValidityHandler(PhantomData); +pub struct BeaconStateMerkleProofValidityHandler(PhantomData); -impl Handler for MerkleProofValidityHandler { - type Case = cases::MerkleProofValidity; +impl Handler for BeaconStateMerkleProofValidityHandler { + type Case = cases::BeaconStateMerkleProofValidity; fn config_name() -> &'static str { E::name() @@ -935,15 +935,11 @@ impl Handler for MerkleProofValidityHandler { } fn handler_name(&self) -> String { - "single_merkle_proof".into() + "single_merkle_proof/BeaconState".into() } - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // Test is skipped due to some changes in the Capella light client - // spec. - // - // https://github.com/sigp/lighthouse/issues/4022 - false + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.altair_enabled() } } @@ -967,8 +963,32 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // TODO(electra) re-enable for electra once merkle proof issues for electra are resolved - fork_name.deneb_enabled() && !fork_name.electra_enabled() + // Enabled in Deneb + fork_name.deneb_enabled() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct BeaconBlockBodyMerkleProofValidityHandler(PhantomData); + +impl Handler for BeaconBlockBodyMerkleProofValidityHandler { + type Case = cases::BeaconBlockBodyMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof/BeaconBlockBody".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.capella_enabled() } } @@ -993,8 +1013,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - fork_name.altair_enabled() && fork_name != ForkName::Electra + fork_name.altair_enabled() } } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index a9322e5dd5..c50032a63d 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -134,7 +134,7 @@ type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); type_name_generic!(PendingAttestation); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); -type_name!(PendingBalanceDeposit); +type_name!(PendingDeposit); type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index c2524c14e2..292625a371 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -243,8 +243,7 @@ mod ssz_static { use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, - LightClientBootstrapAltair, PendingBalanceDeposit, PendingPartialWithdrawal, - WithdrawalRequest, *, + LightClientBootstrapAltair, PendingDeposit, PendingPartialWithdrawal, WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -396,11 +395,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only() - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only() - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -476,13 +474,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only( ) .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -506,13 +503,12 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } #[test] @@ -664,8 +660,8 @@ mod ssz_static { #[test] fn pending_balance_deposit() { - SszStaticHandler::::electra_and_later().run(); - SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); } #[test] @@ -922,8 +918,13 @@ fn kzg_recover_cells_and_proofs() { } #[test] -fn merkle_proof_validity() { - MerkleProofValidityHandler::::default().run(); +fn beacon_state_merkle_proof_validity() { + BeaconStateMerkleProofValidityHandler::::default().run(); +} + +#[test] +fn beacon_block_body_merkle_proof_validity() { + BeaconBlockBodyMerkleProofValidityHandler::::default().run(); } #[test] diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index c76ef91183..9b0ac5ec9b 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -tokio = { workspace = true } +deposit_contract = { workspace = true } +ethers-contract = "1.0.2" ethers-core = { workspace = true } ethers-providers = { workspace = true } -ethers-contract = "1.0.2" -types = { workspace = true } -serde_json = { workspace = true } -deposit_contract = { workspace = true } -unused_port = { workspace = true } hex = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 015a632ff4..3cba908261 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -1,6 +1,6 @@ //! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. //! -//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil) to simulate +//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil) to simulate //! the deposit contract for testing beacon node eth1 integration. //! //! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 159561d5dd..28ff944799 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -5,22 +5,22 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tempfile = { workspace = true } +deposit_contract = { workspace = true } +ethers-core = { workspace = true } +ethers-providers = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +logging = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } serde_json = { workspace = true } task_executor = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } -futures = { workspace = true } -execution_layer = { workspace = true } -sensitive_url = { workspace = true } types = { workspace = true } unused_port = { workspace = true } -ethers-providers = { workspace = true } -ethers-core = { workspace = true } -deposit_contract = { workspace = true } -reqwest = { workspace = true } -hex = { workspace = true } -fork_choice = { workspace = true } -logging = { workspace = true } [features] portable = ["types/portable"] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 0289fd4206..f664509304 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -3,9 +3,10 @@ use crate::execution_engine::{ }; use crate::transactions::transactions; use ethers_providers::Middleware; +use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, - PayloadStatus, + PayloadParameters, PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -251,6 +252,7 @@ impl TestRig { */ let parent_hash = terminal_pow_block_hash; + let parent_gas_limit = DEFAULT_GAS_LIMIT; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); let head_root = Hash256::zero(); @@ -324,15 +326,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, @@ -476,15 +485,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 4696d8d2f1..0d9db528da 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -5,13 +5,14 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -environment = { workspace = true } beacon_node = { workspace = true } -types = { workspace = true } -tempfile = { workspace = true } +beacon_node_fallback = { workspace = true } +environment = { workspace = true } eth2 = { workspace = true } +execution_layer = { workspace = true } +sensitive_url = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } validator_client = { workspace = true } validator_dir = { workspace = true, features = ["insecure_keys"] } -sensitive_url = { workspace = true } -execution_layer = { workspace = true } -tokio = { workspace = true } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 3320898642..ac01c84b9d 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -16,12 +16,13 @@ use validator_client::ProductionValidatorClient; use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; +pub use beacon_node_fallback::ApiTopic; pub use environment; pub use eth2; pub use execution_layer::test_utils::{ Config as MockServerConfig, MockExecutionConfig, MockServer, }; -pub use validator_client::{ApiTopic, Config as ValidatorConfig}; +pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. const HTTP_TIMEOUT: Duration = Duration::from_secs(8); @@ -103,8 +104,6 @@ pub fn testing_client_config() -> ClientConfig { client_config.http_api.enabled = true; client_config.http_api.listen_port = 0; - client_config.dummy_eth1_backend = true; - let now = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("should get system time") diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 7772523284..77645dba45 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -3,20 +3,19 @@ name = "simulator" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -node_test_rig = { path = "../node_test_rig" } -execution_layer = { workspace = true } -types = { workspace = true } -parking_lot = { workspace = true } -futures = { workspace = true } -tokio = { workspace = true } -env_logger = { workspace = true } clap = { workspace = true } +env_logger = { workspace = true } +eth2_network_config = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } +kzg = { workspace = true } +node_test_rig = { path = "../node_test_rig" } +parking_lot = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -eth2_network_config = { workspace = true } serde_json = { workspace = true } -kzg = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index e1cef95cd3..8f659a893f 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -175,7 +175,8 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { executor.spawn( async move { let mut validator_config = testing_validator_config(); - validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); + validator_config.validator_store.fee_recipient = + Some(SUGGESTED_FEE_RECIPIENT.into()); println!("Adding validator client {}", i); // Enable broadcast on every 4th node. @@ -206,7 +207,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { node.server.all_payloads_valid(); }); - let duration_to_genesis = network.duration_to_genesis().await; + let duration_to_genesis = network.duration_to_genesis().await?; println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 3859257fb7..b3b9a46001 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -178,7 +178,8 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { executor.spawn( async move { let mut validator_config = testing_validator_config(); - validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); + validator_config.validator_store.fee_recipient = + Some(SUGGESTED_FEE_RECIPIENT.into()); println!("Adding validator client {}", i); network_1 .add_validator_client_with_fallbacks( @@ -194,7 +195,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { ); } - let duration_to_genesis = network.duration_to_genesis().await; + let duration_to_genesis = network.duration_to_genesis().await?; println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 7b9327a7aa..59efc09baa 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -459,7 +459,7 @@ impl LocalNetwork { .map(|body| body.unwrap().data.finalized.epoch) } - pub async fn duration_to_genesis(&self) -> Duration { + pub async fn duration_to_genesis(&self) -> Result { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -471,6 +471,9 @@ impl LocalNetwork { .data .genesis_time, ); - genesis_time - now + genesis_time.checked_sub(now).ok_or( + "The genesis time has already passed since all nodes started. The node startup time \ + may have regressed, and the current `GENESIS_DELAY` is no longer sufficient.", + ) } } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 142a657f07..7c29715346 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -3,15 +3,14 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -state_processing = { workspace = true } -types = { workspace = true } -ethereum_ssz = { workspace = true } beacon_chain = { workspace = true } +ethereum_ssz = { workspace = true } +state_processing = { workspace = true } tokio = { workspace = true } +types = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml index 63bb87c06e..d2d705f714 100644 --- a/testing/test-test_logger/Cargo.toml +++ b/testing/test-test_logger/Cargo.toml @@ -2,7 +2,6 @@ name = "test-test_logger" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index db5c53e0ac..376aa13406 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -2,29 +2,30 @@ name = "web3signer_tests" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] [dev-dependencies] +account_utils = { workspace = true } async-channel = { workspace = true } +environment = { workspace = true } eth2_keystore = { workspace = true } -types = { workspace = true } +eth2_network_config = { workspace = true } +futures = { workspace = true } +initialized_validators = { workspace = true } +logging = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } -reqwest = { workspace = true } +types = { workspace = true } url = { workspace = true } -validator_client = { workspace = true } -slot_clock = { workspace = true } -futures = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -account_utils = { workspace = true } -serde = { workspace = true } -serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } -serde_json = { workspace = true } +validator_store = { workspace = true } zip = { workspace = true } -parking_lot = { workspace = true } -logging = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 3a039d3c80..e0dee9ceb4 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -22,10 +22,14 @@ mod tests { }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use initialized_validators::{ + load_pem_certificate, load_pkcs12_identity, InitializedValidators, + }; use logging::test_logger; use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; + use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use slot_clock::{SlotClock, TestingSlotClock}; use std::env; use std::fmt::Debug; @@ -41,13 +45,7 @@ mod tests { use tokio::time::sleep; use types::{attestation::AttestationBase, *}; use url::Url; - use validator_client::{ - initialized_validators::{ - load_pem_certificate, load_pkcs12_identity, InitializedValidators, - }, - validator_store::{Error as ValidatorStoreError, ValidatorStore}, - SlashingDatabase, SLASHING_PROTECTION_FILENAME, - }; + use validator_store::{Error as ValidatorStoreError, ValidatorStore}; /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will /// assume it failed to start. @@ -132,7 +130,11 @@ mod tests { } fn client_identity_path() -> PathBuf { - tls_dir().join("lighthouse").join("key.p12") + if cfg!(target_os = "macos") { + tls_dir().join("lighthouse").join("key_legacy.p12") + } else { + tls_dir().join("lighthouse").join("key.p12") + } } fn client_identity_password() -> String { @@ -171,6 +173,8 @@ mod tests { } impl Web3SignerRig { + // We need to hold that lock as we want to get the binary only once + #[allow(clippy::await_holding_lock)] pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { GET_WEB3SIGNER_BIN .get_or_init(|| async { @@ -208,7 +212,7 @@ mod tests { keystore_password_file: keystore_password_filename.to_string(), }; let key_config_file = - File::create(&keystore_dir.path().join("key-config.yaml")).unwrap(); + File::create(keystore_dir.path().join("key-config.yaml")).unwrap(); serde_yaml::to_writer(key_config_file, &key_config).unwrap(); let tls_keystore_file = tls_dir().join("web3signer").join("key.p12"); @@ -322,7 +326,7 @@ mod tests { let log = test_logger(); let validator_dir = TempDir::new().unwrap(); - let config = validator_client::Config::default(); + let config = initialized_validators::Config::default(); let validator_definitions = ValidatorDefinitions::from(validator_definitions); let initialized_validators = InitializedValidators::from_definitions( validator_definitions, @@ -354,7 +358,7 @@ mod tests { let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); - let config = validator_client::Config { + let config = validator_store::Config { enable_web3signer_slashing_protection: slashing_protection_config.local, ..Default::default() }; diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f918e87cf8..3b14dbddba 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,20 @@ #!/bin/bash -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && + +# The lighthouse/key_legacy.p12 file is generated specifically for macOS because the default `openssl pkcs12` encoding +# algorithm in OpenSSL v3 is not compatible with the PKCS algorithm used by the Apple Security Framework. The client +# side (using the reqwest crate) relies on the Apple Security Framework to parse PKCS files. +# We don't need to generate web3signer/key_legacy.p12 because the compatibility issue doesn't occur on the web3signer +# side. It seems that web3signer (Java) uses its own implementation to parse PKCS files. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2469252651 + +# We specify `-days 825` when generating the certificate files because Apple requires TLS server certificates to have a +# validity period of 825 days or fewer. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2474979183 + +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && +openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && +openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -legacy -out lighthouse/key_legacy.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 24b0a2e5c0..4aaf66b747 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUa3O7icWD4W7c5yRMjG/EX422ODUwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 -R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 -aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE -jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw -Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe -V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI -0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM -VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr -67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f -kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa -3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf -TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO -8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE -x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI -dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h -vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ -js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 -0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP -sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ -1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex -XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI -SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 -nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt +VQQDDApsaWdodGhvdXNlMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DApsaWdodGhvdXNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsAg4 +CkW51XFC0ZlcLXOzAHHD3e1y2tCkvQLCC5YG4QGVnXtva4puSGprs5H2r46TM+92 +7EXqNls+UWARLJE8+cw6Jz2Ibpjyv9TwdHUYqlRjSsAJ1E9kFKWnQuzWSPUilY22 +KfkxkEfauAvL5qXBAX9C31E9t/QWWgFtiGetwk+MuVoqLFCifw2iKfKrKod/t0Ua +ykxm3PUi1LIjZq3yZIg6beiVIGNQ/FWcNK3NeR6LP7ZDvSWl1vJAQ/6EBTcNTYKb +B3rEiHmme20Vpl6QQMvzlZ+e+ZaU0JsycvEfKrBACvPXX1Bi1GVFFstb5XQ4a/f4 +p7LUQ9rJwOkm5mRLgrSkNzq4Nk1lPOIam5QFpdW4GBfeIUL0Q4K9io/fYsxF1DXh +fxCW1N6E6+RKhVG2cEdtnAmQxg9d8vIEMvFtuVMFMYjQ+qkJ5V0Ye11V/9lMo4Vf +H2ialSTLTKxoEjmYfCHXKu7JCba04uGEv9gzaX7Zk+uK9gN1FIMvDT3UIHZTDwtr +cm2kjn3wsuRiK3P974pAVAome+60jmH9M0IsBxLXilCI6aIcYwvHkfoSNwXQr1AI +6rBBA4o8df0OFvMp2/r1Ll9nLDTT7AxtjHu7C2HU46Fy9U01+oRiqW+UCY9+daMD +tQJMTkjfPwOU6b9KUOPKpraDnPubwNU6CXs6ySMCAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFKbpk6hZNzlzv/AdKtsl6x+dgBo+MA0GCSqGSIb3DQEBCwUAA4ICAQCmICqz +X5WOhwUm6LJJwMvKgFoVkav6ZcG/bEPiLe4waM2BubTpa1KPke8kMSmd/eLRxOiU +o1Z4Wi+bDw/ZGZHhnj/bJBZei9O+uRV4RbHCBh/LutRjY5zrublXMTtmjxCIjjHK +nQnoFFqKelyUGdaOw1ttooRT2FSDriZ6LKJ9vrTx0eCPBPA0EyaxuaxX3e/qYfE6 +sdrseEZSsouAmNCQ6jHnrQlzjeGAE6tlSTC3NVWbDlDbnX6cdRF07kV5PxnfcoyO +HGM3hdrIk5mhLpXrNKZp1nI4Ecd6UKiMCLgVxfexRKVJn00IR1URotRXZ2H9hQnh +xT5CnEBM+9dXoiwIvU+QYpnxo7mc47I6VkvoBI05rnS10bliwAk20yZuqc8iYC7R +r+ISRnhAcSb0otnKvxQQqzRH4Fi13g4mIoxbPJq+xTrNomKe/ywUe5q1Dt8QMhEg +7Sv8yg4ErKEvWIk5N0JOe1PaysobWXkv5n+xH9eJneyuBHGdi8qXe+2JLkK7ZfKB +uuLZyQcbUxb0/FSOhvtYu+2hPUb7nCOFvheAafHJu1P0pOkP8NNpM9X+tNw8Orum +VVFO8rvOh4+pH8sXRZ4tUQ33mbQS96ZSuiMJYCQf6EDkqmtRkOHCAvKkEtRLm2yV +4IRAZKHZaeKYr1UXwaqzpwES+8ZZLjURkvqvnQ== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index d00b6c2122..2b510c6b6d 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF -vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA -61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE -t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI -pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA -H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q -ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq -zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r -dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 -bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H -NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 -55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk -pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs -MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp -VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD -wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U -XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f -3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm -FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL -JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo -HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj -CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY -zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf -twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc -k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL -xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ -QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 -nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa -6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S -O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf -+e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr -bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp -NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW -RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG -lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr -UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE -xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf -I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR -y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa -fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ -9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch -IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv -Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft -Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c -GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 -LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts -ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T -NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh -HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg -vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCwCDgKRbnVcULR +mVwtc7MAccPd7XLa0KS9AsILlgbhAZWde29rim5IamuzkfavjpMz73bsReo2Wz5R +YBEskTz5zDonPYhumPK/1PB0dRiqVGNKwAnUT2QUpadC7NZI9SKVjbYp+TGQR9q4 +C8vmpcEBf0LfUT239BZaAW2IZ63CT4y5WiosUKJ/DaIp8qsqh3+3RRrKTGbc9SLU +siNmrfJkiDpt6JUgY1D8VZw0rc15Hos/tkO9JaXW8kBD/oQFNw1NgpsHesSIeaZ7 +bRWmXpBAy/OVn575lpTQmzJy8R8qsEAK89dfUGLUZUUWy1vldDhr9/instRD2snA +6SbmZEuCtKQ3Org2TWU84hqblAWl1bgYF94hQvRDgr2Kj99izEXUNeF/EJbU3oTr +5EqFUbZwR22cCZDGD13y8gQy8W25UwUxiND6qQnlXRh7XVX/2UyjhV8faJqVJMtM +rGgSOZh8Idcq7skJtrTi4YS/2DNpftmT64r2A3UUgy8NPdQgdlMPC2tybaSOffCy +5GIrc/3vikBUCiZ77rSOYf0zQiwHEteKUIjpohxjC8eR+hI3BdCvUAjqsEEDijx1 +/Q4W8ynb+vUuX2csNNPsDG2Me7sLYdTjoXL1TTX6hGKpb5QJj351owO1AkxOSN8/ +A5Tpv0pQ48qmtoOc+5vA1ToJezrJIwIDAQABAoICAAav4teBDpSTjBZD3Slc28/u +6NUYnORZe+iYnwZ4DIrZPij29D40ym7pAm5jFrWHyDYqddOqVEHJKMGuniuZpaQk +cSqy2IJbDRDi5fK5zNYSBQBlJMc/IzryXNUOA8kbU6HN+fDEpqPBSjqNOCtRRwoa +uE+dDNspsPx6UWh9IWMTfCUOZ8u6XguCWRN+3g6F8M2yS/I9AZG81898qBueczbR +qTNdQoAyEnS2sj7ODqArQniJIMmh3he5D15SrNefeVt+1D5uGEkwiQ9NqL58ZfGp +zcPa7HWB/H7Wmac3W0rwpxfDa5fgIq3Id93Sm9fh/yka1Z28c8cGgknxxKiIs6Jg +F7CKZIBJ3XxjcgytB223El/R8faHLpMJSPadDZ7uuU3yD/Qvp/JhRrdgkpE5bbzC +rWL92eVL86cbI/Hamup7VZMMfQpvjJg7FXPUr6ACKBetNkvXH0rqAkxHR8ZgfTeM +EwrpSWS0aktxxeMjzPq4DUaKKVGiN2KMDhbHEd5h2ovWMzyr14isohW81Z8w5R68 +F+2jq3IlVTLe06vmTRXAhOpwecj8UpraZjM1qyFpBd/lAolTjjMxzKJ2DcHlWI8Q +7e9LMvt1fj3bbzJVubdrITjdeom5CnDrmDGcErX9xzom8m3auYLszUENp/sfIHru +0DP+LKb2W4BOmXKs3VABAoIBAQDm4HNpOA7X7Jw7oowS4MoZOeeTjzcldT2AP9O7 +jFf2I2t5Ig0mIIrIrEJCL1X+A3i3RblV7lhU3Dpag8dhZUrXhydgnXKEMH/zz3gx +daCY1NO1fxAx5Y4J8VlCMIA7FpZI6sgRPjLBOFdkD34HcKHsUu/r3KQ1A1xZGLOU +o1kxF2WyORGBwn83kWzhzK9RIwFIdx67m7ZLzwoD6nQul4A6qq1EE+QI5x4UYpBx +ZvQsWUtj0EujIKJFszJczivwGQ86Aj0MB7EaHg+bWtYET1kUmDmc/72sksQJVcsK +wYtkv/MsznAvuWfHVjYJo47+Qs1zpuDKEUC1cu768LtlKpljAoIBAQDDL/T2KilF +qK8IW2u7nyWY8ksN/xJOVC79ozx2cGlR/zbeht051NiaLP8YMwVKl618Bw5L+aHG +a1xA0AeuTvuo5TK/ObrWzMAY6A35gXPMd8msN6SJzIKHZSZrcg2GXTSFkn7iCRJp +vl58VX4FubfrNIXy3NGbgF2muz3Rwvk7bj5Ur3NxX574RLSuftw01rDt2fnfYGKD +NfLXzoR3rJ/E+wmS7sjBJbltvmySDZOyjDDJwAgMrn45Xbh9rVT5w62BbAJ78OTY +O3CBf9t40FmeSBlelqwSY6tUmf02+B8FhMTJzxlaCup2qIPn5z0RHIZ43bnqZ/X1 +nkNSs8ko0f1BAoIBABCw9WcL+Ha/0mO1Uq8itTmxp/5RAkmg+jtFYgdTFCDlWqW9 +QnoZLC9p1Lh4N51PnvCRB98ghh5MdaOJl2aBLjH6wWwItfi8kOONgkEBIgUqjcu3 +TfJtiCFL44oXe43KCj9nSeOFPaIecqL3Q8NB71LohBPnNa/neEuwr3r1fENCT8Xc +vllFOHFKADcq1xnkj/kvM3eYwEsmwrCZyKB9r3WOVUxwq7HBE7mhjpPEP67dHcgv +jOhUOacUV3XCKgcHqMQm2Ub/X1xmA/bVUFerbONCRhgFnS7WxXlvTGiQqYU1I11/ +5zhsDQaqQunbe0ECj1vnGqVBLg5wKrrVoJalx8UCggEAE8438wqQKYtWR2jPY7hg +XkanqwHo353XLtFzfykk5rcY4DebFxUr7WkHcXMr5EfDyMQGhVsNOU8Hi2QQg3Vs +P9UR8yludgFMtLpHQLwL/gFhq2HyBjGERSzUWy61hJ7Mh4k36sO05Jn2iHM8WGRh +7zHjLaOOeVLrLdHuEezQ0WD8Xid3dVeYj+SY2OPygEIQrfHiUvI6zMmanJ9N/b68 +b4ZxkEE+iarESAh8h81s4T8sbCxaJL9H+5Yw9D+0UauzXWCSV/U3o2FUpy9MG9Q4 +Y8E5Icn0J+GJLwp5ESzYKP0x4rBrCCH3bJbo240xOx1D39vP06M85/FpL2kizkuQ +gQKCAQBTmQd/wT+0hH2JoEA2yCtB3ylDSmarZr9yZ83j3hy7oJOL48FhzhMTGjNR +BqmwbV3/2Vky85FYXYwcOIHbwI8twKtI4OxOiXLnLkYZ4nNXLm65ckR1SfJhRyrM +8K/alI2l3AxY/RkZiUnnRGEAmjG8hwzka1Y6j9zT7KhFTTBlg0YR5TOD8bsd9/rX +yVR+XkgyxIshgcI6w7MnwdGt+aAGokGjZv+k09vTOnaFF4rcJgOCZ9t4ymnG3m+v +Ac4I2b8BA46WCxA6zeNn5IeKZL0Ibgv1NGbTW3vEzu2D9VNU3pqTm9Pq3QpMAp85 +UyUzHP+SV/CL1Otbg/HjN6JGIcgY -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index 73468fa084..f2ef6d20e2 100644 Binary files a/testing/web3signer_tests/tls/lighthouse/key.p12 and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 new file mode 100644 index 0000000000..c3394fae9a Binary files /dev/null and b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem index 6266cadf9b..cae7603320 100644 --- a/testing/web3signer_tests/tls/lighthouse/web3signer.pem +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUTFaMFhei/518WFdGuVrjhuPl+RAwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS -cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz -1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa -J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H -9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G -WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB -YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 -4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 -HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr -rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS -8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN -/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg -/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu -taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 -RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d -0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv -gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ -v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut -OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 -mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 -4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z -yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX -JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS +VQQDDAp3ZWIzc2lnbmVyMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DAp3ZWIzc2lnbmVyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAm8ys +aOKMuGgcCHfDlOM0ACovWvH81bU9qtiw9QbwMHmcBU40KJ6grfCCpOzQSa2wtUkU +hwM1YDOqGYndunyAFEMGuk/KAFkMsAuKxkQDmACaAoQGga8S6uF8ggq7FKa/puq2 +8CauA1ukoKkzu2zkAnSQILqespqNlsAdmltW/G274QRtQgP9q09pfZQXo8hmhwn0 +yuPnOc8PwC4gFMDssQsJum/FsttmeZhMOqL7hPIJ4hMyX+BQMW/XC7QiT6YXuvbd +045J+KVO6JGnLjMAL7ZKkizOC6GRjkvIylbcppCnxLJZkOM0cbdJ/zKowkl8U7un +J6oIIXb8SIVWapl+E8oaeRcx+7PuSqp4vUly7GkzK3YjMm6CMtdpCDt80wmq4ljt +ZSqURZ5XFKV+kd0b7KjRZAhhBxHcb/L/ScpKj95a7Nwqc/c42ABwLNCfyhUoNrbv +JOXjDNVbq9WWUrkBO3/2p1wDOYqip/8Bh8RfJMsqrpb2p4qe92cIhh6uvnftYEW/ +eMnz3T78/Z4QwSzr018ak84lTQWoQv1c5ikkf5a1eD1XJXIUfV8TcteiynATKtMo +vxb9p8C0StSPRgP/ep7g6JxwWOQBnQo8b2VIBK0fGuiU+8Nd03zmQx4n4Szv5mRE +4MAGGF2KKmd2/7FsHEZJ/vV2jGioXNMnUb65y00CAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFJeXpQKlrKwdrz52DAe37aZ+1fSkMA0GCSqGSIb3DQEBCwUAA4ICAQAKsLqM +yYr3bpx+6SzVLxb+KmNvoXELyzZkaJJX0nBmxmL+u7WD7J8NT61QPte89I6MDPVF +6qJTwBn5JE5yfBpy+D106s9nMAFLyTP07K1SbgaWbxL4LdxB0uMORj5Bt643gJCH +pkhGIOOv2XNAnuPonV6dJmaaKZz4MD2c6bN6DcfXlR92VlMoLv231M1EbgJvOJmg +9SEzwAkcLHhtuTbwyZv8+UgjsKhlZKJygaXTJVMacSXHUKoszvvWCWNK2ITReuPE +qWUd/AveZs+H+S0UDwJj9yXo47+ZSiXGxLUzdH5AutnapDTNTAQ1DySqtrJDUbw6 +GHsAPWtWefsJXDEuctgT0U+PgtDvGhA4Vv78Xrlg655jAYrKqKAbY9E21vDPv97J +1oNzbzsNlP5sRhlypf6VOeIuHF/T1m2MNtCyx5o/yH9EDLMicS629kMb6eBAK/qJ +MRFqRc9AbBHzIdC3f/YmG06WXc3fViPIAHP4zLC+wOjy4btC3pLQttQtrsOyqIbo +7IRPJ2PveMKGGaMCB+raGMO8kD4giJr3iUhrJde8Ggn32Ngngh9SsJFbGUWCPDmk +20USiQ5GU9CQlQhwDmA0K7vyjhOALP0bseTISqKkWeZZqeLrn1Y9Kl7rl46aEnzi +zs2KFCBovSPHkorjrhbm9N3KNpkBZaoa7SPA7A== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem index 6266cadf9b..cae7603320 100644 --- a/testing/web3signer_tests/tls/web3signer/cert.pem +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUTFaMFhei/518WFdGuVrjhuPl+RAwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS -cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz -1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa -J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H -9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G -WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB -YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 -4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 -HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr -rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS -8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN -/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg -/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu -taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 -RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d -0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv -gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ -v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut -OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 -mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 -4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z -yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX -JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS +VQQDDAp3ZWIzc2lnbmVyMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DAp3ZWIzc2lnbmVyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAm8ys +aOKMuGgcCHfDlOM0ACovWvH81bU9qtiw9QbwMHmcBU40KJ6grfCCpOzQSa2wtUkU +hwM1YDOqGYndunyAFEMGuk/KAFkMsAuKxkQDmACaAoQGga8S6uF8ggq7FKa/puq2 +8CauA1ukoKkzu2zkAnSQILqespqNlsAdmltW/G274QRtQgP9q09pfZQXo8hmhwn0 +yuPnOc8PwC4gFMDssQsJum/FsttmeZhMOqL7hPIJ4hMyX+BQMW/XC7QiT6YXuvbd +045J+KVO6JGnLjMAL7ZKkizOC6GRjkvIylbcppCnxLJZkOM0cbdJ/zKowkl8U7un +J6oIIXb8SIVWapl+E8oaeRcx+7PuSqp4vUly7GkzK3YjMm6CMtdpCDt80wmq4ljt +ZSqURZ5XFKV+kd0b7KjRZAhhBxHcb/L/ScpKj95a7Nwqc/c42ABwLNCfyhUoNrbv +JOXjDNVbq9WWUrkBO3/2p1wDOYqip/8Bh8RfJMsqrpb2p4qe92cIhh6uvnftYEW/ +eMnz3T78/Z4QwSzr018ak84lTQWoQv1c5ikkf5a1eD1XJXIUfV8TcteiynATKtMo +vxb9p8C0StSPRgP/ep7g6JxwWOQBnQo8b2VIBK0fGuiU+8Nd03zmQx4n4Szv5mRE +4MAGGF2KKmd2/7FsHEZJ/vV2jGioXNMnUb65y00CAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFJeXpQKlrKwdrz52DAe37aZ+1fSkMA0GCSqGSIb3DQEBCwUAA4ICAQAKsLqM +yYr3bpx+6SzVLxb+KmNvoXELyzZkaJJX0nBmxmL+u7WD7J8NT61QPte89I6MDPVF +6qJTwBn5JE5yfBpy+D106s9nMAFLyTP07K1SbgaWbxL4LdxB0uMORj5Bt643gJCH +pkhGIOOv2XNAnuPonV6dJmaaKZz4MD2c6bN6DcfXlR92VlMoLv231M1EbgJvOJmg +9SEzwAkcLHhtuTbwyZv8+UgjsKhlZKJygaXTJVMacSXHUKoszvvWCWNK2ITReuPE +qWUd/AveZs+H+S0UDwJj9yXo47+ZSiXGxLUzdH5AutnapDTNTAQ1DySqtrJDUbw6 +GHsAPWtWefsJXDEuctgT0U+PgtDvGhA4Vv78Xrlg655jAYrKqKAbY9E21vDPv97J +1oNzbzsNlP5sRhlypf6VOeIuHF/T1m2MNtCyx5o/yH9EDLMicS629kMb6eBAK/qJ +MRFqRc9AbBHzIdC3f/YmG06WXc3fViPIAHP4zLC+wOjy4btC3pLQttQtrsOyqIbo +7IRPJ2PveMKGGaMCB+raGMO8kD4giJr3iUhrJde8Ggn32Ngngh9SsJFbGUWCPDmk +20USiQ5GU9CQlQhwDmA0K7vyjhOALP0bseTISqKkWeZZqeLrn1Y9Kl7rl46aEnzi +zs2KFCBovSPHkorjrhbm9N3KNpkBZaoa7SPA7A== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key index d969753406..97a36d1bb2 100644 --- a/testing/web3signer_tests/tls/web3signer/key.key +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDScvshqu3747j4 -KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz1FPflpj7pVWa -gWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaaJ3smHx2+VOha -MWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H9C0UBIzXP7Pn -GrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6GWLtJvk5ekfOV -lRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pBYKPThE5zW5Kh -IxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK84y5L4BXxxohG -0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8HtmSgkPEgfSV -RxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQrrrIUQAuXDcQX -11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS8kbmmuhxshsn -ZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN/IC4PpwtYUO3 -/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABAoICABRxePXJ+KOpznPE5Owo7BWe -BqTzC/K1xlCYm0v5IJzYEQlM4e4p4wZ+/kR6Hex/nM4IR+bbZpxjcOUObIsWpJTI -VAgS2y5RcTp+UJzfXpJogIpKiqBMNutAqPOrK8Hg797PtlsmAKoBmNn8xqU1+2Oa -FX/rKaJus6qKZ2bz16DnkFUL4foabDJte0IFbd2yAyGv1ZqGiqFKSJFK+wYeoMZU -LzWOEyUR/wK5ryVwJJCY8z9BKAoKNYnb4oHTFlDRDdztIlxv29sR9dtHsjA3EdQc -nOCTNi7eY6JJlucgBSWGrsS6vTvpImGggIIWt6sOh0Px6Fg0F7mFtsESex2GePow -50MwKFbbVo3TUYRYTggJj7ba4+yrl/dsAWJUX3F90xNj/6REF+2+Licb7kgCHQKw -TvdExiikOOFtuFRkl5fqyoM9Ph+sj7/db5Pd53D8vaMjR3Yw/JA5dKPZS5ZKHBs0 -qo7FxV8ZlOESMv2eF6y0kM4wLhUN8wnEWxpsFWtXDNjYIlQ6W5qrfwR1vlnIkrmb -bYQCJFtko6CKUEa8yb4OvLgyX6VSskeYEC5zdekivZWJN/OZZa/xIS2nupYqD4GT -Y3QcsEhfzDvVIwI7M+eBwS5qjgdwN2qEGrXva5KKesb2zdjNircKaUahTWJNYHjj -jHGOSY/vyGFH2HFZNYZpAoIBAQDyoMpeXBDQhAXbHpIm6p8KljqRMHU05UeRRWVR -d0RKXGYq/bUzoAhr8F2QE2+HC+2NnBGh6qR5QNO/6H6p8Du6aSXDaDNJxTErOOmY -pAkbOlcA7TjpDSrNUr4EfAXl6vUF7JB8jJHEXIqBkbGWOFYPzwLEwErQAlQN2u4e -u9HKG3Me+DP2IcrCgZ5iWvmjV4l+vXYyBEXoJqHOWEscWXHiz64c336oZqwqKe/x -s8Xy2sd6FRU/mp34wXT4kZ56/U4BV+DEN20fffBiTfMQxKmXhMykmD/O63dASCiA -seZrZK5mRND+aS95MqI6FMm0ToKj24RvvAWR8w50cuF7wl5zAoIBAQDeDC6ImN7K -mSLaMBaIhoZsJDdG0cJiFPRmwtepeoWt4qUWuc51LOFthhlkyGx/JbEzFMK6uYTu -hHHNOgk6ydrz1+HOzpSvN0Iz61j1hJd8Ve/0MyTBg912FPe2p3hR9dN4j5Ly+oes -QvNIr/ReW5HJhDcgXm/9oT68XyzrKM3t93XPoO4wDPSHPbRWE2dzLrNi1xg/ZyRz -ZLAtBsGPG5rVAeSEob0ytZH2H1pHfkRQ/1jSKxwb+QVMfjDd5FrEAMLA4E6J8HFz -RDHTmrveGrR1i5BJrce3VUOAuL7Y3iw6Sb+b1LyA8htxiYfBVdVfCeocDv64m0R5 -NJs6Milm9uk1AoIBAQCdQLForusG+kqBVjMLng0uY2faKjoM6n2UHhIo1tAgEfr1 -6jHDH/nVW5iIhNBICucQXRLgip/HJskXHKzbn6RWkUe0epijO3c+uEhOciKkzw8M -vrOf+LTBFtupNGjuN3ZPPJ/42XKwffoXOEKNRj4hSN5Wfvr+DkREJp0mtjymbVwT -unKTGBu+LRxmSuh5gYbP6iPtDu/wIvnEL12fJim2Azyp4gDJTKJRQZUOZqHpYPrg -mUGIU8IHM/uID3lT5VDldftrsTC8tHdUf4kGWTBB0ASCuVrB1cMYmqwFnUfmWv7d -scRy3+Gw/6w9ULPadPgfE2umr4o8qfe4aazS9YsZAoIBADZH+hQwcr5KQ0fdW5TS -dgf3rn+khYVepAR++yOWLRm9/yeYEo14hD82+fw2Nre6aiAXoibtdT6tp/hIiLsT -X3AexTe+LoDK3Gc+0Edsu2+MvpUO75xS9Q+JvqirNfGrS5/8USsO7Z3B3CFXykBK -2E/P/33tOCljgqegCKYQGo9i4Cz6pV+fuyNYhT5Jjg+NShMOjAHr3/BJm/vV2/l1 -ARuzU77MnyjHVEA7l+FET8URNxBhs4RvEsmJS77itQGXQgTOkMSNv94yvI+DEwwP -sS/PB13LmrgJou/TuevgHCW/o5Sfo9lN1kGiIkq0Be4uyUlErSZJ5qpOnufSHWbr -U0UCggEAC5WM3BXKo11Y+XphsYnpJesiB9C5HMvhnB5oCHH7ffIVqkXp2AiUnWy6 -HE+DwUWFEtRLYr4beTXn+TeunoQa7X5K1JXV41XENf5CsbQTIUnX2j7o2ilCEx9C -rDPtpUZPObqXHBiHSF67Il7GitCud+7YDAGqbJABlV3WF0MkPIfW/cxN3cb65FoI -AEV3OZiS6zvDR91++ovNV5QAmH1vljvipM7kKy5RsLFF8GYa0KNTNJ/EYojKmw00 -2OakG0pjjDcWjfdGI+i5gcHNUZwbgqx4NG/RY3YslJswBhGGlhEGuuUtpH47HTM2 -oJ/aHbXf6PdOO9MYiI/es/dfKK8ywA== +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCbzKxo4oy4aBwI +d8OU4zQAKi9a8fzVtT2q2LD1BvAweZwFTjQonqCt8IKk7NBJrbC1SRSHAzVgM6oZ +id26fIAUQwa6T8oAWQywC4rGRAOYAJoChAaBrxLq4XyCCrsUpr+m6rbwJq4DW6Sg +qTO7bOQCdJAgup6ymo2WwB2aW1b8bbvhBG1CA/2rT2l9lBejyGaHCfTK4+c5zw/A +LiAUwOyxCwm6b8Wy22Z5mEw6ovuE8gniEzJf4FAxb9cLtCJPphe69t3Tjkn4pU7o +kacuMwAvtkqSLM4LoZGOS8jKVtymkKfEslmQ4zRxt0n/MqjCSXxTu6cnqgghdvxI +hVZqmX4Tyhp5FzH7s+5Kqni9SXLsaTMrdiMyboIy12kIO3zTCariWO1lKpRFnlcU +pX6R3RvsqNFkCGEHEdxv8v9JykqP3lrs3Cpz9zjYAHAs0J/KFSg2tu8k5eMM1Vur +1ZZSuQE7f/anXAM5iqKn/wGHxF8kyyqulvanip73ZwiGHq6+d+1gRb94yfPdPvz9 +nhDBLOvTXxqTziVNBahC/VzmKSR/lrV4PVclchR9XxNy16LKcBMq0yi/Fv2nwLRK +1I9GA/96nuDonHBY5AGdCjxvZUgErR8a6JT7w13TfOZDHifhLO/mZETgwAYYXYoq +Z3b/sWwcRkn+9XaMaKhc0ydRvrnLTQIDAQABAoICAAkK8k+CJWXHUSfiqlJCylq0 +wsaLY2qUrZgezKKO0l5rPeAxn6W4k0iVPEKjUMboqGgqmODp5u1cV6P01GRFuU9e +a+O2IYTZ+6UKKSCvLkA3z2M+/iZadDAi102wSWx+Qy3Gfh4ROKoO5c7CE15uXbY7 +cBQeO3t6rKYnz3ANRiQPoHHZMRg9pWRjOb26seaP3qI5ieDuz8BotsLSSoq51I3F +ahKioyhpmJiNJjuEXdyOXyl6hhgk1l1liZHvKiCnr2/qx97xaXS075ALHYIjyVUm +RkzBV1I7K04a3Z7KN4i0E7C/I04ICfFhuznvMEwFOjdH8ytCW1zJ4a8PN3/8xGGo +DpgfxkpJtcTWG6KLIA7yR6o0k4qhiBHu7Tki2WuCUOYI+L7hcKx3IV3JUWLd2djd +ic2kETnu/ARl1WhDwq/GJSmCoMf1svJGPyMimOpd0xW39DrIB02CHymLe64GsXES +bk+Tma2Cwe8vRmSgB832h5x81u32S9POdWQMCRePaIS84Lay8ICrw47a00itTMhI +BzHqXEOqn4kiLlLeKiIHW8btDSQH/YAHE6Yavzn28lmvo2Ad0VGxhSOo4X2KKvNo +MUdLpoDdJvwz1Mt4XLGqtOzJe15I3lvHgc9D0Ea7O82FUvfm6t4KHML4o9QRZ3P6 +EQQlOxiLaF/V07bM2bfXAoIBAQDaORYMakf3mybMC1JypVGIW/bjeOvPJyuKqjLR +emdDyXmZ8m1opJLMT/wBTLCFuVFIgjKH+UW8S8utymPIh70RQ2fVh4HFhays9qgu +FQQGq1QVeoCcKeF8yZjvCDSDtT9y3hHPDpFAv/q7RvrzpySQgptsEeZ8XrScqFqO +5ukZQg7Q9eEQY6djbyLDPoT3bRBTmtMJc2PQM/w7IBnmmpUMh+qpg7/0WDeIP4ur +hkMB5Var0X3WPJnomvfbrq0nc9o7FZ1n5Lb7vwIYDIMQY+1G6lMPzf2QDYJxj/ns +1Mw56AO8vy3YpDQktUuYMXrOSO7ieyeiE+PP/pIB2eJB3OBPAoIBAQC2xS/eb09S +v7Q235rc6M/giqErI+s5M8iQkAh6pr6o/gtaUyFWN9neL9GVAcgWcr6X42NnBP3b +XJ116WRRlS4d6bMvjty3JAXWgOgcnFGwZMzM3jKwk0bSJhQ0A5/FMADcSeGeDisD +VfSOLiga7vxhuDEyt5vbX/D6jLBoGPryJyzLrPwawRApoTffOFdWLvuc0E3nsLs9 +KGC2AL13PwgEmXYlXrEYMCQD9ptPJxp4a9CWZ653/WCFvtwvdYy8nKjkyRiZnFgu +JpWYx1Q+iUGUt0P9J22LYMi2s13eOmEnwirWPaNuscTWwR+MVQ0wV0ajMb9WwbDa +BHxdgjOnOjejAoIBABnShXxkmy1+i4G7mT79tv8murlCFs1Fek6HQ0osbMnFroD8 +AdxPFRveHxjcRUsdmbO1zFrwsULNyUVAXLxe641+Z2wKA94mqj8xVMdXL0nGvkVo +YI2aGxeUF2e5ldU7/k0OYBcbRy7dSMMebBWm4pnDCrcoCxkOb9rVxRmB03Vsrqol +XkN9N1J51Sg7XqqUmkj6WhyVLet2K1Onwcja4+Y+sxLqBy3XvoJ92n4qfgVcaPwd +5wER3Lh8lfXF3rF7cQhqz1x64Pg7KvlDicLHwE0S709Adc+0+YmMmuteZug3PzH3 +gWpQS+dIGLDisgB0+ueL4S25osx6+DYF3VMj2tsCggEAEnTTIqkm33pQXoIyB0DT +TR9yqwKTjFE4XbDNymTwN+x/hP2EDkrRi2hUx7FIXUv1By2FKjAM2ov9spyA7uQb +phWlbJYGn8/ZbxHRXqJCdYeYTs/ZWo9kxW/m9mGvTqBMVfJ4ABf6K/oQGhN7JU0g +21VYUoDMElkb4pE2dyyemrluAptTUfhRA4MuOzJbT26rJei7FTx5i/F54qcsetA4 +pQ5CKf/n7kNeP7A1esa+G473n1iU+7TwfFELtxctwRYo0AGmpExvcymeTbFDRpVh +s/zVtsi2fS6m1hxCzGAk03j+DmhnCpSVBgK2httH26vUjEJHyiEBFOMDFAZD5Q0x +9QKCAQEAmB0BzXXtFgM2o3t9Gw6YTcWm9fXXSQuT1UqVOw1Gbd8q1RHSFzRYL8/f +odWSrvPoZpedToqxXTcNpcWzuKQOxKZMlcUcI5U749hSSBCUnq75MxoVTgy+hkYj +/Ijng9sqkWZxm6m3132BvFmw7KfVSzYUMlPW5tXAnXPouzvLHcTBU8tgC5yKEKpb +4FAnkD9VGHOxgu4RKkuXQ0hklPo1F58qO8J/D0s20mph9x8WFG2icPfJvQj6Ltvc +R/NzlbwUVwstqboRIy9fTn8qMbIpWwPqirCRuI039VX/pUNDJD8AIZ6/KP5D8HR5 +IkAOLnJJ4xiPtupTQCtNpq/469PsWA== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 index 792dc197f8..c6c2da8461 100644 Binary files a/testing/web3signer_tests/tls/web3signer/key.p12 and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index c4722fe587..86d61fba75 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE +lighthouse 49:99:C9:A4:05:4C:EC:BE:FD:0B:C3:C3:C1:2F:A4:D3:AB:70:96:47:51:F5:5B:3B:37:65:31:56:18:B7:B8:AD diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 4c338e91b9..504d96ae1c 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "validator_client" version = "0.3.5" -authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] +authors = ["Sigma Prime "] edition = { workspace = true } [lib] @@ -12,52 +12,32 @@ path = "src/lib.rs" tokio = { workspace = true } [dependencies] -tree_hash = { workspace = true } -clap = { workspace = true } -slashing_protection = { workspace = true } -slot_clock = { workspace = true } -types = { workspace = true } -safe_arith = { workspace = true } -serde = { workspace = true } -bincode = { workspace = true } -serde_json = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -futures = { workspace = true } -dirs = { workspace = true } -directory = { workspace = true } -lockfile = { workspace = true } -environment = { workspace = true } -parking_lot = { workspace = true } -filesystem = { workspace = true } -hex = { workspace = true } -deposit_contract = { workspace = true } -bls = { workspace = true } -eth2 = { workspace = true } -tempfile = { workspace = true } -validator_dir = { workspace = true } -clap_utils = { workspace = true } -eth2_keystore = { workspace = true } account_utils = { workspace = true } -lighthouse_version = { workspace = true } -warp_utils = { workspace = true } -warp = { workspace = true } -hyper = { workspace = true } -ethereum_serde_utils = { workspace = true } -libsecp256k1 = { workspace = true } -ring = { workspace = true } -rand = { workspace = true, features = ["small_rng"] } -lighthouse_metrics = { workspace = true } -monitoring_api = { workspace = true } -sensitive_url = { workspace = true } -task_executor = { workspace = true } -reqwest = { workspace = true, features = ["native-tls"] } -url = { workspace = true } -malloc_utils = { workspace = true } -sysinfo = { workspace = true } -system_health = { path = "../common/system_health" } -logging = { workspace = true } -strum = { workspace = true } -itertools = { workspace = true } +beacon_node_fallback = { workspace = true } +clap = { workspace = true } +clap_utils = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } +doppelganger_service = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } fdlimit = "0.3.0" +graffiti_file = { workspace = true } +hyper = { workspace = true } +initialized_validators = { workspace = true } +metrics = { workspace = true } +monitoring_api = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +slashing_protection = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +validator_http_api = { workspace = true } +validator_http_metrics = { workspace = true } +validator_metrics = { workspace = true } +validator_services = { workspace = true } +validator_store = { workspace = true } diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml new file mode 100644 index 0000000000..c15ded43d7 --- /dev/null +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "beacon_node_fallback" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[lib] +name = "beacon_node_fallback" +path = "src/lib.rs" + +[dependencies] +environment = { workspace = true } +eth2 = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +strum = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } diff --git a/validator_client/src/beacon_node_health.rs b/validator_client/beacon_node_fallback/src/beacon_node_health.rs similarity index 95% rename from validator_client/src/beacon_node_health.rs rename to validator_client/beacon_node_fallback/src/beacon_node_health.rs index 1783bb312c..e5b0487656 100644 --- a/validator_client/src/beacon_node_health.rs +++ b/validator_client/beacon_node_fallback/src/beacon_node_health.rs @@ -1,5 +1,8 @@ +use super::CandidateError; +use eth2::BeaconNodeHttpClient; use itertools::Itertools; use serde::{Deserialize, Serialize}; +use slog::{warn, Logger}; use std::cmp::Ordering; use std::fmt::{Debug, Display, Formatter}; use std::str::FromStr; @@ -285,6 +288,30 @@ impl BeaconNodeHealth { } } +pub async fn check_node_health( + beacon_node: &BeaconNodeHttpClient, + log: &Logger, +) -> Result<(Slot, bool, bool), CandidateError> { + let resp = match beacon_node.get_node_syncing().await { + Ok(resp) => resp, + Err(e) => { + warn!( + log, + "Unable connect to beacon node"; + "error" => %e + ); + + return Err(CandidateError::Offline); + } + }; + + Ok(( + resp.data.head_slot, + resp.data.is_optimistic, + resp.data.el_offline, + )) +} + #[cfg(test)] mod tests { use super::ExecutionEngineHealth::{Healthy, Unhealthy}; @@ -292,7 +319,7 @@ mod tests { BeaconNodeHealth, BeaconNodeHealthTier, BeaconNodeSyncDistanceTiers, IsOptimistic, SyncDistanceTier, }; - use crate::beacon_node_fallback::Config; + use crate::Config; use std::str::FromStr; use types::Slot; diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/beacon_node_fallback/src/lib.rs similarity index 99% rename from validator_client/src/beacon_node_fallback.rs rename to validator_client/beacon_node_fallback/src/lib.rs index e5fe419983..95a221f189 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -2,12 +2,11 @@ //! "fallback" behaviour; it will try a request on all of the nodes until one or none of them //! succeed. -use crate::beacon_node_health::{ - BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, - SyncDistanceTier, +pub mod beacon_node_health; +use beacon_node_health::{ + check_node_health, BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, + IsOptimistic, SyncDistanceTier, }; -use crate::check_synced::check_node_health; -use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; @@ -24,6 +23,7 @@ use std::time::{Duration, Instant}; use strum::{EnumString, EnumVariantNames}; use tokio::{sync::RwLock, time::sleep}; use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; +use validator_metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; /// Message emitted when the VC detects the BN is using a different spec. const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updating"; @@ -739,7 +739,7 @@ impl ApiTopic { mod tests { use super::*; use crate::beacon_node_health::BeaconNodeHealthTier; - use crate::SensitiveUrl; + use eth2::SensitiveUrl; use eth2::Timeouts; use std::str::FromStr; use strum::VariantNames; diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml new file mode 100644 index 0000000000..66b61a411b --- /dev/null +++ b/validator_client/doppelganger_service/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "doppelganger_service" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +beacon_node_fallback = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +parking_lot = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } + +[dev-dependencies] +futures = { workspace = true } +logging = { workspace = true } diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/doppelganger_service/src/lib.rs similarity index 98% rename from validator_client/src/doppelganger_service.rs rename to validator_client/doppelganger_service/src/lib.rs index 1d552cc5ad..35228fe354 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -29,8 +29,7 @@ //! //! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. -use crate::beacon_node_fallback::BeaconNodeFallback; -use crate::validator_store::ValidatorStore; +use beacon_node_fallback::BeaconNodeFallback; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use parking_lot::RwLock; @@ -114,6 +113,13 @@ struct LivenessResponses { /// validators on the network. pub const DEFAULT_REMAINING_DETECTION_EPOCHS: u64 = 1; +/// This crate cannot depend on ValidatorStore as validator_store depends on this crate and +/// initialises the doppelganger protection. For this reason, we abstract the validator store +/// functions this service needs through the following trait +pub trait DoppelgangerValidatorStore { + fn get_validator_index(&self, pubkey: &PublicKeyBytes) -> Option; +} + /// Store the per-validator status of doppelganger checking. #[derive(Debug, PartialEq)] pub struct DoppelgangerState { @@ -280,15 +286,20 @@ impl DoppelgangerService { /// Starts a reoccurring future which will try to keep the doppelganger service updated each /// slot. - pub fn start_update_service( + pub fn start_update_service( service: Arc, context: RuntimeContext, - validator_store: Arc>, + validator_store: Arc, beacon_nodes: Arc>, slot_clock: T, - ) -> Result<(), String> { + ) -> Result<(), String> + where + E: EthSpec, + T: 'static + SlotClock, + V: DoppelgangerValidatorStore + Send + Sync + 'static, + { // Define the `get_index` function as one that uses the validator store. - let get_index = move |pubkey| validator_store.validator_index(&pubkey); + let get_index = move |pubkey| validator_store.get_validator_index(&pubkey); // Define the `get_liveness` function as one that queries the beacon node API. let log = service.log.clone(); diff --git a/validator_client/graffiti_file/Cargo.toml b/validator_client/graffiti_file/Cargo.toml new file mode 100644 index 0000000000..8868f5aec8 --- /dev/null +++ b/validator_client/graffiti_file/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "graffiti_file" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[lib] +name = "graffiti_file" +path = "src/lib.rs" + +[dependencies] +bls = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +types = { workspace = true } + +[dev-dependencies] +hex = { workspace = true } +tempfile = { workspace = true } diff --git a/validator_client/src/graffiti_file.rs b/validator_client/graffiti_file/src/lib.rs similarity index 66% rename from validator_client/src/graffiti_file.rs rename to validator_client/graffiti_file/src/lib.rs index 29da3dca5a..9dab2e7827 100644 --- a/validator_client/src/graffiti_file.rs +++ b/validator_client/graffiti_file/src/lib.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use slog::warn; use std::collections::HashMap; use std::fs::File; use std::io::{prelude::*, BufReader}; @@ -65,6 +66,9 @@ impl GraffitiFile { for line in lines { let line = line.map_err(|e| Error::InvalidLine(e.to_string()))?; + if line.trim().is_empty() { + continue; + } let (pk_opt, graffiti) = read_line(&line)?; match pk_opt { Some(pk) => { @@ -100,6 +104,27 @@ fn read_line(line: &str) -> Result<(Option, Graffiti), Error> { } } +// Given the various graffiti control methods, determine the graffiti that will be used for +// the next block produced by the validator with the given public key. +pub fn determine_graffiti( + validator_pubkey: &PublicKeyBytes, + log: &slog::Logger, + graffiti_file: Option, + validator_definition_graffiti: Option, + graffiti_flag: Option, +) -> Option { + graffiti_file + .and_then(|mut g| match g.load_graffiti(validator_pubkey) { + Ok(g) => g, + Err(e) => { + warn!(log, "Failed to read graffiti file"; "error" => ?e); + None + } + }) + .or(validator_definition_graffiti) + .or(graffiti_flag) +} + #[cfg(test)] mod tests { use super::*; @@ -111,9 +136,15 @@ mod tests { const CUSTOM_GRAFFITI1: &str = "custom-graffiti1"; const CUSTOM_GRAFFITI2: &str = "graffitiwall:720:641:#ffff00"; const EMPTY_GRAFFITI: &str = ""; + // Newline test cases + const CUSTOM_GRAFFITI4: &str = "newlines-tests"; + const PK1: &str = "0x800012708dc03f611751aad7a43a082142832b5c1aceed07ff9b543cf836381861352aa923c70eeb02018b638aa306aa"; const PK2: &str = "0x80001866ce324de7d80ec73be15e2d064dcf121adf1b34a0d679f2b9ecbab40ce021e03bb877e1a2fe72eaaf475e6e21"; const PK3: &str = "0x9035d41a8bc11b08c17d0d93d876087958c9d055afe86fce558e3b988d92434769c8d50b0b463708db80c6aae1160c02"; + const PK4: &str = "0x8c0fca2cc70f44188a4b79e5623ac85898f1df479e14a1f4ebb615907810b6fb939c3fb4ba2081b7a5b6e33dc73621d2"; + const PK5: &str = "0x87998b0ea4a8826f03d1985e5a5ce7235bd3a56fb7559b02a55b737f4ebc69b0bf35444de5cf2680cb7eb2283eb62050"; + const PK6: &str = "0xa2af9b128255568e2ee5c42af118cc4301198123d210dbdbf2ca7ec0222f8d491f308e85076b09a2f44a75875cd6fa0f"; // Create a graffiti file in the required format and return a path to the file. fn create_graffiti_file() -> PathBuf { @@ -121,6 +152,9 @@ mod tests { let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); let pk3 = PublicKeyBytes::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); + let pk4 = PublicKeyBytes::deserialize(&hex::decode(&PK4[2..]).unwrap()).unwrap(); + let pk5 = PublicKeyBytes::deserialize(&hex::decode(&PK5[2..]).unwrap()).unwrap(); + let pk6 = PublicKeyBytes::deserialize(&hex::decode(&PK6[2..]).unwrap()).unwrap(); let file_name = temp.into_path().join("graffiti.txt"); @@ -138,6 +172,29 @@ mod tests { graffiti_file .write_all(format!("{}:{}\n", pk3.as_hex_string(), EMPTY_GRAFFITI).as_bytes()) .unwrap(); + + // Test Lines with leading newlines - these empty lines will be skipped + graffiti_file.write_all(b"\n").unwrap(); + graffiti_file.write_all(b" \n").unwrap(); + graffiti_file + .write_all(format!("{}: {}\n", pk4.as_hex_string(), CUSTOM_GRAFFITI4).as_bytes()) + .unwrap(); + + // Test Empty lines between entries - these will be skipped + graffiti_file.write_all(b"\n").unwrap(); + graffiti_file.write_all(b" \n").unwrap(); + graffiti_file.write_all(b"\t\n").unwrap(); + graffiti_file + .write_all(format!("{}: {}\n", pk5.as_hex_string(), CUSTOM_GRAFFITI4).as_bytes()) + .unwrap(); + + // Test Trailing empty lines - these will be skipped + graffiti_file + .write_all(format!("{}: {}\n", pk6.as_hex_string(), CUSTOM_GRAFFITI4).as_bytes()) + .unwrap(); + graffiti_file.write_all(b"\n").unwrap(); + graffiti_file.write_all(b" \n").unwrap(); + graffiti_file.flush().unwrap(); file_name } @@ -150,6 +207,9 @@ mod tests { let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); let pk3 = PublicKeyBytes::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); + let pk4 = PublicKeyBytes::deserialize(&hex::decode(&PK4[2..]).unwrap()).unwrap(); + let pk5 = PublicKeyBytes::deserialize(&hex::decode(&PK5[2..]).unwrap()).unwrap(); + let pk6 = PublicKeyBytes::deserialize(&hex::decode(&PK6[2..]).unwrap()).unwrap(); // Read once gf.read_graffiti_file().unwrap(); @@ -168,6 +228,20 @@ mod tests { GraffitiString::from_str(EMPTY_GRAFFITI).unwrap().into() ); + // Test newline cases - all empty lines should be skipped + assert_eq!( + gf.load_graffiti(&pk4).unwrap().unwrap(), + GraffitiString::from_str(CUSTOM_GRAFFITI4).unwrap().into() + ); + assert_eq!( + gf.load_graffiti(&pk5).unwrap().unwrap(), + GraffitiString::from_str(CUSTOM_GRAFFITI4).unwrap().into() + ); + assert_eq!( + gf.load_graffiti(&pk6).unwrap().unwrap(), + GraffitiString::from_str(CUSTOM_GRAFFITI4).unwrap().into() + ); + // Random pk should return the default graffiti let random_pk = Keypair::random().pk.compress(); assert_eq!( diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml new file mode 100644 index 0000000000..76a021ab8c --- /dev/null +++ b/validator_client/http_api/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "validator_http_api" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[lib] +name = "validator_http_api" +path = "src/lib.rs" + +[dependencies] +account_utils = { workspace = true } +beacon_node_fallback = { workspace = true } +bls = { workspace = true } +deposit_contract = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } +doppelganger_service = { workspace = true } +eth2 = { workspace = true } +eth2_keystore = { workspace = true } +ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } +graffiti_file = { workspace = true } +initialized_validators = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +signing_method = { workspace = true } +slashing_protection = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +sysinfo = { workspace = true } +system_health = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +types = { workspace = true } +url = { workspace = true } +validator_dir = { workspace = true } +validator_services = { workspace = true } +validator_store = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } +zeroize = { workspace = true } + +[dev-dependencies] +futures = { workspace = true } +itertools = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/http_api/src/api_secret.rs similarity index 74% rename from validator_client/src/http_api/api_secret.rs rename to validator_client/http_api/src/api_secret.rs index afcac477ec..bac54dc8b2 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/http_api/src/api_secret.rs @@ -5,7 +5,7 @@ use std::fs; use std::path::{Path, PathBuf}; use warp::Filter; -/// The name of the file which stores the API token. +/// The default name of the file which stores the API token. pub const PK_FILENAME: &str = "api-token.txt"; pub const PK_LEN: usize = 33; @@ -31,14 +31,32 @@ pub struct ApiSecret { impl ApiSecret { /// If the public key is already on-disk, use it. /// - /// The provided `dir` is a directory containing `PK_FILENAME`. + /// The provided `pk_path` is a path containing API token. /// /// If the public key file is missing on disk, create a new key and /// write it to disk (over-writing any existing files). - pub fn create_or_open>(dir: P) -> Result { - let pk_path = dir.as_ref().join(PK_FILENAME); + pub fn create_or_open>(pk_path: P) -> Result { + let pk_path = pk_path.as_ref(); + + // Check if the path is a directory + if pk_path.is_dir() { + return Err(format!( + "API token path {:?} is a directory, not a file", + pk_path + )); + } if !pk_path.exists() { + // Create parent directories if they don't exist + if let Some(parent) = pk_path.parent() { + std::fs::create_dir_all(parent).map_err(|e| { + format!( + "Unable to create parent directories for {:?}: {:?}", + pk_path, e + ) + })?; + } + let length = PK_LEN; let pk: String = thread_rng() .sample_iter(&Alphanumeric) @@ -47,7 +65,7 @@ impl ApiSecret { .collect(); // Create and write the public key to file with appropriate permissions - create_with_600_perms(&pk_path, pk.to_string().as_bytes()).map_err(|e| { + create_with_600_perms(pk_path, pk.to_string().as_bytes()).map_err(|e| { format!( "Unable to create file with permissions for {:?}: {:?}", pk_path, e @@ -55,13 +73,16 @@ impl ApiSecret { })?; } - let pk = fs::read(&pk_path) - .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))? + let pk = fs::read(pk_path) + .map_err(|e| format!("cannot read {}: {}", pk_path.display(), e))? .iter() .map(|&c| char::from(c)) .collect(); - Ok(Self { pk, pk_path }) + Ok(Self { + pk, + pk_path: pk_path.to_path_buf(), + }) } /// Returns the API token. diff --git a/validator_client/src/http_api/create_signed_voluntary_exit.rs b/validator_client/http_api/src/create_signed_voluntary_exit.rs similarity index 98% rename from validator_client/src/http_api/create_signed_voluntary_exit.rs rename to validator_client/http_api/src/create_signed_voluntary_exit.rs index a9586da57e..32269b202b 100644 --- a/validator_client/src/http_api/create_signed_voluntary_exit.rs +++ b/validator_client/http_api/src/create_signed_voluntary_exit.rs @@ -1,10 +1,10 @@ -use crate::validator_store::ValidatorStore; use bls::{PublicKey, PublicKeyBytes}; use eth2::types::GenericResponse; use slog::{info, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit}; +use validator_store::ValidatorStore; pub async fn create_signed_voluntary_exit( pubkey: PublicKey, diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/http_api/src/create_validator.rs similarity index 97% rename from validator_client/src/http_api/create_validator.rs rename to validator_client/http_api/src/create_validator.rs index afa5d4fed1..f90a1057a4 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/http_api/src/create_validator.rs @@ -1,9 +1,8 @@ -use crate::ValidatorStore; use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use account_utils::{ eth2_keystore::Keystore, eth2_wallet::{bip39::Mnemonic, WalletBuilder}, - random_mnemonic, random_password, ZeroizeString, + random_mnemonic, random_password, }; use eth2::lighthouse_vc::types::{self as api_types}; use slot_clock::SlotClock; @@ -11,6 +10,8 @@ use std::path::{Path, PathBuf}; use types::ChainSpec; use types::EthSpec; use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; +use validator_store::ValidatorStore; +use zeroize::Zeroizing; /// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in /// this validator client. @@ -59,7 +60,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, for request in validator_requests { let voting_password = random_password(); let withdrawal_password = random_password(); - let voting_password_string = ZeroizeString::from( + let voting_password_string = Zeroizing::from( String::from_utf8(voting_password.as_bytes().to_vec()).map_err(|e| { warp_utils::reject::custom_server_error(format!( "locally generated password is not utf8: {:?}", @@ -199,7 +200,7 @@ pub async fn create_validators_web3signer( pub fn get_voting_password_storage( secrets_dir: &Option, voting_keystore: &Keystore, - voting_password_string: &ZeroizeString, + voting_password_string: &Zeroizing, ) -> Result { if let Some(secrets_dir) = &secrets_dir { let password_path = keystore_password_path(secrets_dir, voting_keystore); diff --git a/validator_client/src/http_api/graffiti.rs b/validator_client/http_api/src/graffiti.rs similarity index 98% rename from validator_client/src/http_api/graffiti.rs rename to validator_client/http_api/src/graffiti.rs index 79d4fd61f3..86238a697c 100644 --- a/validator_client/src/http_api/graffiti.rs +++ b/validator_client/http_api/src/graffiti.rs @@ -1,8 +1,8 @@ -use crate::validator_store::ValidatorStore; use bls::PublicKey; use slot_clock::SlotClock; use std::sync::Arc; use types::{graffiti::GraffitiString, EthSpec, Graffiti}; +use validator_store::ValidatorStore; pub fn get_graffiti( validator_pubkey: PublicKey, diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/http_api/src/keystores.rs similarity index 92% rename from validator_client/src/http_api/keystores.rs rename to validator_client/http_api/src/keystores.rs index 074c578347..fd6b4fdae5 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -1,9 +1,5 @@ //! Implementation of the standard keystore management API. -use crate::{ - initialized_validators::Error, signing_method::SigningMethod, InitializedValidators, - ValidatorStore, -}; -use account_utils::{validator_definitions::PasswordStorage, ZeroizeString}; +use account_utils::validator_definitions::PasswordStorage; use eth2::lighthouse_vc::{ std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, @@ -13,6 +9,8 @@ use eth2::lighthouse_vc::{ types::{ExportKeystoresResponse, SingleExportKeystoresResponse}, }; use eth2_keystore::Keystore; +use initialized_validators::{Error, InitializedValidators}; +use signing_method::SigningMethod; use slog::{info, warn, Logger}; use slot_clock::SlotClock; use std::path::PathBuf; @@ -21,8 +19,10 @@ use task_executor::TaskExecutor; use tokio::runtime::Handle; use types::{EthSpec, PublicKeyBytes}; use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; +use validator_store::ValidatorStore; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; +use zeroize::Zeroizing; pub fn list( validator_store: Arc>, @@ -75,12 +75,6 @@ pub fn import( ))); } - info!( - log, - "Importing keystores via standard HTTP API"; - "count" => request.keystores.len(), - ); - // Import slashing protection data before keystores, so that new keystores don't start signing // without it. Do not return early on failure, propagate the failure to each key. let slashing_protection_status = @@ -156,12 +150,25 @@ pub fn import( statuses.push(status); } + let successful_import = statuses + .iter() + .filter(|status| matches!(status.status, ImportKeystoreStatus::Imported)) + .count(); + + if successful_import > 0 { + info!( + log, + "Imported keystores via standard HTTP API"; + "count" => successful_import, + ); + } + Ok(ImportKeystoresResponse { data: statuses }) } fn import_single_keystore( keystore: Keystore, - password: ZeroizeString, + password: Zeroizing, validator_dir_path: PathBuf, secrets_dir: Option, validator_store: &ValidatorStore, @@ -238,7 +245,23 @@ pub fn delete( task_executor: TaskExecutor, log: Logger, ) -> Result { - let export_response = export(request, validator_store, task_executor, log)?; + let export_response = export(request, validator_store, task_executor, log.clone())?; + + // Check the status is Deleted to confirm deletion is successful, then only display the log + let successful_deletion = export_response + .data + .iter() + .filter(|response| matches!(response.status.status, DeleteKeystoreStatus::Deleted)) + .count(); + + if successful_deletion > 0 { + info!( + log, + "Deleted keystore via standard HTTP API"; + "count" => successful_deletion, + ); + } + Ok(DeleteKeystoresResponse { data: export_response .data diff --git a/validator_client/src/http_api/mod.rs b/validator_client/http_api/src/lib.rs similarity index 98% rename from validator_client/src/http_api/mod.rs rename to validator_client/http_api/src/lib.rs index ded25abbcd..f3dab3780c 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/http_api/src/lib.rs @@ -7,20 +7,24 @@ mod remotekeys; mod tests; pub mod test_utils; +pub use api_secret::PK_FILENAME; -use crate::beacon_node_fallback::CandidateInfo; -use crate::http_api::graffiti::{delete_graffiti, get_graffiti, set_graffiti}; +use graffiti::{delete_graffiti, get_graffiti, set_graffiti}; + +use create_signed_voluntary_exit::create_signed_voluntary_exit; +use graffiti_file::{determine_graffiti, GraffitiFile}; +use validator_store::ValidatorStore; -use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; -use crate::{determine_graffiti, BlockService, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, }; pub use api_secret::ApiSecret; +use beacon_node_fallback::CandidateInfo; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, }; +use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_DIR}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ @@ -46,6 +50,7 @@ use task_executor::TaskExecutor; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; +use validator_services::block_service::BlockService; use warp::{sse::Event, Filter}; use warp_utils::task::blocking_json_task; @@ -96,10 +101,17 @@ pub struct Config { pub allow_origin: Option, pub allow_keystore_export: bool, pub store_passwords_in_secrets_dir: bool, + pub http_token_path: PathBuf, } impl Default for Config { fn default() -> Self { + let http_token_path = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_NETWORK) + .join(DEFAULT_VALIDATOR_DIR) + .join(PK_FILENAME); Self { enabled: false, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -107,6 +119,7 @@ impl Default for Config { allow_origin: None, allow_keystore_export: false, store_passwords_in_secrets_dir: false, + http_token_path, } } } diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/http_api/src/remotekeys.rs similarity index 98% rename from validator_client/src/http_api/remotekeys.rs rename to validator_client/http_api/src/remotekeys.rs index 053bbcb4b2..289be57182 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/http_api/src/remotekeys.rs @@ -1,5 +1,4 @@ //! Implementation of the standard remotekey management API. -use crate::{initialized_validators::Error, InitializedValidators, ValidatorStore}; use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, Web3SignerDefinition, }; @@ -8,6 +7,7 @@ use eth2::lighthouse_vc::std_types::{ ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, ListRemotekeysResponse, SingleListRemotekeysResponse, Status, }; +use initialized_validators::{Error, InitializedValidators}; use slog::{info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; @@ -15,6 +15,7 @@ use task_executor::TaskExecutor; use tokio::runtime::Handle; use types::{EthSpec, PublicKeyBytes}; use url::Url; +use validator_store::ValidatorStore; use warp::Rejection; use warp_utils::reject::custom_server_error; diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/http_api/src/test_utils.rs similarity index 95% rename from validator_client/src/http_api/test_utils.rs rename to validator_client/http_api/src/test_utils.rs index 119c611553..390095eec7 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -1,21 +1,19 @@ -use crate::doppelganger_service::DoppelgangerService; -use crate::key_cache::{KeyCache, CACHE_FILENAME}; -use crate::{ - http_api::{ApiSecret, Config as HttpConfig, Context}, - initialized_validators::{InitializedValidators, OnDecryptFailure}, - Config, ValidatorDefinitions, ValidatorStore, -}; +use crate::api_secret::PK_FILENAME; +use crate::{ApiSecret, Config as HttpConfig, Context}; +use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, - ZeroizeString, }; use deposit_contract::decode_eth1_tx_data; +use doppelganger_service::DoppelgangerService; use eth2::{ lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, types::ErrorMessage as ApiErrorMessage, Error as ApiError, }; use eth2_keystore::KeystoreBuilder; +use initialized_validators::key_cache::{KeyCache, CACHE_FILENAME}; +use initialized_validators::{InitializedValidators, OnDecryptFailure}; use logging::test_logger; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -29,6 +27,8 @@ use std::time::Duration; use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; use tokio::sync::oneshot; +use validator_store::{Config as ValidatorStoreConfig, ValidatorStore}; +use zeroize::Zeroizing; pub const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -74,6 +74,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join(PK_FILENAME); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -86,19 +87,17 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(token_path).unwrap(); let api_pubkey = api_secret.api_token(); - let config = Config { - validator_dir: validator_dir.path().into(), - secrets_dir: secrets_dir.path().into(), + let config = ValidatorStoreConfig { fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), ..Default::default() }; let spec = Arc::new(E::default_spec()); - let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_db_path = validator_dir.path().join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); let slot_clock = @@ -180,6 +179,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: tempdir().unwrap().path().join(PK_FILENAME), } } @@ -202,8 +202,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap() } @@ -324,7 +324,7 @@ impl ApiTester { .collect::>(); let (response, mnemonic) = if s.specify_mnemonic { - let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let mnemonic = Zeroizing::from(random_mnemonic().phrase().to_string()); let request = CreateValidatorsMnemonicRequest { mnemonic: mnemonic.clone(), key_derivation_path_offset: s.key_derivation_path_offset, diff --git a/validator_client/src/http_api/tests.rs b/validator_client/http_api/src/tests.rs similarity index 95% rename from validator_client/src/http_api/tests.rs rename to validator_client/http_api/src/tests.rs index ba3b7f685b..7ea3d7ebaa 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -3,15 +3,13 @@ mod keystores; -use crate::doppelganger_service::DoppelgangerService; -use crate::{ - http_api::{ApiSecret, Config as HttpConfig, Context}, - initialized_validators::InitializedValidators, - Config, ValidatorDefinitions, ValidatorStore, -}; +use doppelganger_service::DoppelgangerService; +use initialized_validators::{Config as InitializedValidatorsConfig, InitializedValidators}; + +use crate::{ApiSecret, Config as HttpConfig, Context}; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, - random_password_string, ZeroizeString, + random_password_string, validator_definitions::ValidatorDefinitions, }; use deposit_contract::decode_eth1_tx_data; use eth2::{ @@ -34,6 +32,8 @@ use std::time::Duration; use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; use types::graffiti::GraffitiString; +use validator_store::{Config as ValidatorStoreConfig, ValidatorStore}; +use zeroize::Zeroizing; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -47,42 +47,43 @@ struct ApiTester { url: SensitiveUrl, slot_clock: TestingSlotClock, _validator_dir: TempDir, + _secrets_dir: TempDir, _test_runtime: TestRuntime, } impl ApiTester { pub async fn new() -> Self { - let mut config = Config::default(); - config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); + let config = ValidatorStoreConfig { + fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), + ..Default::default() + }; Self::new_with_config(config).await } - pub async fn new_with_config(mut config: Config) -> Self { + pub async fn new_with_config(config: ValidatorStoreConfig) -> Self { let log = test_logger(); let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join("api-token.txt"); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); let initialized_validators = InitializedValidators::from_definitions( validator_defs, validator_dir.path().into(), - Config::default(), + InitializedValidatorsConfig::default(), log.clone(), ) .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(&token_path).unwrap(); let api_pubkey = api_secret.api_token(); - config.validator_dir = validator_dir.path().into(); - config.secrets_dir = secrets_dir.path().into(); - let spec = Arc::new(E::default_spec()); - let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_db_path = validator_dir.path().join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); let genesis_time: u64 = 0; @@ -129,6 +130,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: token_path, }, sse_logging_components: None, log, @@ -139,7 +141,7 @@ impl ApiTester { let (listening_socket, server) = super::serve(ctx, test_runtime.task_executor.exit()).unwrap(); - tokio::spawn(async { server.await }); + tokio::spawn(server); let url = SensitiveUrl::parse(&format!( "http://{}:{}", @@ -157,13 +159,14 @@ impl ApiTester { url, slot_clock, _validator_dir: validator_dir, + _secrets_dir: secrets_dir, _test_runtime: test_runtime, } } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey.clone()).unwrap() } @@ -284,7 +287,7 @@ impl ApiTester { .collect::>(); let (response, mnemonic) = if s.specify_mnemonic { - let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let mnemonic = Zeroizing::from(random_mnemonic().phrase().to_string()); let request = CreateValidatorsMnemonicRequest { mnemonic: mnemonic.clone(), key_derivation_path_offset: s.key_derivation_path_offset, @@ -344,22 +347,21 @@ impl ApiTester { .set_nextaccount(s.key_derivation_path_offset) .unwrap(); - for i in 0..s.count { + for validator in response.iter().take(s.count) { let keypairs = wallet .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) .unwrap(); let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); assert_eq!( - response[i].voting_pubkey, + validator.voting_pubkey, voting_keypair.pk.clone().into(), "the locally generated voting pk should match the server response" ); let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); - let deposit_bytes = - serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + let deposit_bytes = serde_utils::hex::decode(&validator.eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) @@ -1147,11 +1149,11 @@ async fn validator_builder_boost_factor() { /// `prefer_builder_proposals` and `builder_boost_factor` values. #[tokio::test] async fn validator_derived_builder_boost_factor_with_process_defaults() { - let config = Config { + let config = ValidatorStoreConfig { builder_proposals: true, prefer_builder_proposals: false, builder_boost_factor: Some(80), - ..Config::default() + ..ValidatorStoreConfig::default() }; ApiTester::new_with_config(config) .await @@ -1181,11 +1183,11 @@ async fn validator_derived_builder_boost_factor_with_process_defaults() { #[tokio::test] async fn validator_builder_boost_factor_global_builder_proposals_true() { - let config = Config { + let config = ValidatorStoreConfig { builder_proposals: true, prefer_builder_proposals: false, builder_boost_factor: None, - ..Config::default() + ..ValidatorStoreConfig::default() }; ApiTester::new_with_config(config) .await @@ -1194,11 +1196,11 @@ async fn validator_builder_boost_factor_global_builder_proposals_true() { #[tokio::test] async fn validator_builder_boost_factor_global_builder_proposals_false() { - let config = Config { + let config = ValidatorStoreConfig { builder_proposals: false, prefer_builder_proposals: false, builder_boost_factor: None, - ..Config::default() + ..ValidatorStoreConfig::default() }; ApiTester::new_with_config(config) .await @@ -1207,11 +1209,11 @@ async fn validator_builder_boost_factor_global_builder_proposals_false() { #[tokio::test] async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { - let config = Config { + let config = ValidatorStoreConfig { builder_proposals: true, prefer_builder_proposals: true, builder_boost_factor: None, - ..Config::default() + ..ValidatorStoreConfig::default() }; ApiTester::new_with_config(config) .await @@ -1220,11 +1222,11 @@ async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { #[tokio::test] async fn validator_builder_boost_factor_global_prefer_builder_proposals_true_override() { - let config = Config { + let config = ValidatorStoreConfig { builder_proposals: false, prefer_builder_proposals: true, builder_boost_factor: None, - ..Config::default() + ..ValidatorStoreConfig::default() }; ApiTester::new_with_config(config) .await diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs similarity index 97% rename from validator_client/src/http_api/tests/keystores.rs rename to validator_client/http_api/src/tests/keystores.rs index b6923d1c78..6559a2bb9e 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -1,4 +1,3 @@ -use super::super::super::validator_store::DEFAULT_GAS_LIMIT; use super::*; use account_utils::random_password_string; use bls::PublicKeyBytes; @@ -14,8 +13,10 @@ use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; use types::{attestation::AttestationBase, Address}; +use validator_store::DEFAULT_GAS_LIMIT; +use zeroize::Zeroizing; -fn new_keystore(password: ZeroizeString) -> Keystore { +fn new_keystore(password: Zeroizing) -> Keystore { let keypair = Keypair::random(); Keystore( KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) @@ -129,7 +130,7 @@ fn check_keystore_get_response<'a>( for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) { assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2)); assert_eq!(ks1.derivation_path, ks2.path()); - assert!(ks1.readonly == None || ks1.readonly == Some(false)); + assert!(ks1.readonly.is_none() || ks1.readonly == Some(false)); } } @@ -146,7 +147,7 @@ fn check_keystore_import_response( } } -fn check_keystore_delete_response<'a>( +fn check_keystore_delete_response( response: &DeleteKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -633,7 +634,7 @@ async fn check_get_set_fee_recipient() { assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: TEST_DEFAULT_FEE_RECIPIENT, } ); @@ -653,7 +654,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_1.clone(), + ethaddress: fee_recipient_public_key_1, }, ) .await @@ -666,14 +667,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -685,7 +686,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[2], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_2.clone(), + ethaddress: fee_recipient_public_key_2, }, ) .await @@ -698,16 +699,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -719,7 +720,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_override.clone(), + ethaddress: fee_recipient_override, }, ) .await @@ -731,16 +732,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_override.clone() + fee_recipient_override } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -760,14 +761,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -813,7 +814,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: DEFAULT_GAS_LIMIT, } ); @@ -842,14 +843,14 @@ async fn check_get_set_gas_limit() { .await .expect("should get gas limit"); let expected = if i == 1 { - gas_limit_public_key_1.clone() + gas_limit_public_key_1 } else { DEFAULT_GAS_LIMIT }; assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -883,7 +884,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -916,7 +917,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -943,7 +944,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -1304,7 +1305,7 @@ async fn delete_concurrent_with_signing() { let handle = handle.spawn(async move { for j in 0..num_attestations { let mut att = make_attestation(j, j + 1); - for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + for public_key in thread_pubkeys.iter() { let _ = validator_store .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) .await; @@ -2083,7 +2084,7 @@ async fn import_remotekey_web3signer_disabled() { web3signer_req.enable = false; // Import web3signers. - let _ = tester + tester .client .post_lighthouse_validators_web3signer(&vec![web3signer_req]) .await @@ -2147,8 +2148,11 @@ async fn import_remotekey_web3signer_enabled() { // 1 validator imported. assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions(); + let web3_vals = tester + .initialized_validators + .read() + .validator_definitions() + .to_vec(); // Import remotekeys. let import_res = tester @@ -2165,11 +2169,13 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions(); + { + let vals = tester.initialized_validators.read(); + let remote_vals = vals.validator_definitions(); - // Web3signer should not be overwritten since it is enabled. - assert!(web3_vals == remote_vals); + // Web3signer should not be overwritten since it is enabled. + assert!(web3_vals == remote_vals); + } // Remotekey should not be imported. let expected_responses = vec![SingleListRemotekeysResponse { diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml new file mode 100644 index 0000000000..c29a4d18fa --- /dev/null +++ b/validator_client/http_metrics/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "validator_http_metrics" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +lighthouse_version = { workspace = true } +malloc_utils = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } +validator_services = { workspace = true } +validator_store = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/http_metrics/src/lib.rs similarity index 68% rename from validator_client/src/http_metrics/mod.rs rename to validator_client/http_metrics/src/lib.rs index 67cab2bdc3..984b752e5a 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/http_metrics/src/lib.rs @@ -1,18 +1,20 @@ //! This crate provides a HTTP server that is solely dedicated to serving the `/metrics` endpoint. //! //! For other endpoints, see the `http_api` crate. -pub mod metrics; -use crate::{DutiesService, ValidatorStore}; use lighthouse_version::version_with_platform; +use malloc_utils::scrape_allocator_metrics; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, Logger}; -use slot_clock::SystemTimeSlotClock; +use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use types::EthSpec; +use validator_services::duties_service::DutiesService; +use validator_store::ValidatorStore; use warp::{http::Response, Filter}; #[derive(Debug)] @@ -120,7 +122,7 @@ pub fn serve( .map(move || inner_ctx.clone()) .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( - metrics::gather_prometheus_metrics(&ctx) + gather_prometheus_metrics(&ctx) .map(|body| { Response::builder() .status(200) @@ -156,3 +158,59 @@ pub fn serve( Ok((listening_socket, server)) } + +pub fn gather_prometheus_metrics( + ctx: &Context, +) -> std::result::Result { + use validator_metrics::*; + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + + { + let shared = ctx.shared.read(); + + if let Some(genesis_time) = shared.genesis_time { + if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { + let distance = now.as_secs() as i64 - genesis_time as i64; + set_gauge(&GENESIS_DISTANCE, distance); + } + } + + if let Some(duties_service) = &shared.duties_service { + if let Some(slot) = duties_service.slot_clock.now() { + let current_epoch = slot.epoch(E::slots_per_epoch()); + let next_epoch = current_epoch + 1; + + set_int_gauge( + &PROPOSER_COUNT, + &[CURRENT_EPOCH], + duties_service.proposer_count(current_epoch) as i64, + ); + set_int_gauge( + &ATTESTER_COUNT, + &[CURRENT_EPOCH], + duties_service.attester_count(current_epoch) as i64, + ); + set_int_gauge( + &ATTESTER_COUNT, + &[NEXT_EPOCH], + duties_service.attester_count(next_epoch) as i64, + ); + } + } + } + + // It's important to ensure these metrics are explicitly enabled in the case that users aren't + // using glibc and this function causes panics. + if ctx.config.allocator_metrics_enabled { + scrape_allocator_metrics(); + } + + warp_utils::metrics::scrape_health_metrics(); + + encoder + .encode(&metrics::gather(), &mut buffer) + .map_err(|e| format!("{e:?}"))?; + + String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) +} diff --git a/validator_client/initialized_validators/Cargo.toml b/validator_client/initialized_validators/Cargo.toml new file mode 100644 index 0000000000..05e85261f9 --- /dev/null +++ b/validator_client/initialized_validators/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "initialized_validators" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +account_utils = { workspace = true } +bincode = { workspace = true } +bls = { workspace = true } +eth2_keystore = { workspace = true } +filesystem = { workspace = true } +lockfile = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +signing_method = { workspace = true } +slog = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } +validator_dir = { workspace = true } +validator_metrics = { workspace = true } +zeroize = { workspace = true } diff --git a/validator_client/src/key_cache.rs b/validator_client/initialized_validators/src/key_cache.rs similarity index 100% rename from validator_client/src/key_cache.rs rename to validator_client/initialized_validators/src/key_cache.rs diff --git a/validator_client/src/initialized_validators.rs b/validator_client/initialized_validators/src/lib.rs similarity index 97% rename from validator_client/src/initialized_validators.rs rename to validator_client/initialized_validators/src/lib.rs index c94115e5ec..bd64091dae 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -6,20 +6,22 @@ //! The `InitializedValidators` struct in this file serves as the source-of-truth of which //! validators are managed by this validator client. -use crate::signing_method::SigningMethod; +pub mod key_cache; + use account_utils::{ read_password, read_password_from_user, read_password_string, validator_definitions::{ self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, CONFIG_FILENAME, }, - ZeroizeString, }; use eth2_keystore::Keystore; -use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; +use metrics::set_gauge; use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; +use serde::{Deserialize, Serialize}; +use signing_method::SigningMethod; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; @@ -31,10 +33,9 @@ use types::graffiti::GraffitiString; use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; +use zeroize::Zeroizing; -use crate::key_cache; -use crate::key_cache::KeyCache; -use crate::Config; +use key_cache::KeyCache; /// Default timeout for a request to a remote signer for a signature. /// @@ -45,6 +46,24 @@ const DEFAULT_REMOTE_SIGNER_REQUEST_TIMEOUT: Duration = Duration::from_secs(12); // Use TTY instead of stdin to capture passwords from users. const USE_STDIN: bool = false; +pub const DEFAULT_WEB3SIGNER_KEEP_ALIVE: Option = Some(Duration::from_secs(20)); + +// The configuration for initialised validators. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Config { + pub web3_signer_keep_alive_timeout: Option, + pub web3_signer_max_idle_connections: Option, +} + +impl Default for Config { + fn default() -> Self { + Config { + web3_signer_keep_alive_timeout: DEFAULT_WEB3SIGNER_KEEP_ALIVE, + web3_signer_max_idle_connections: None, + } + } +} + pub enum OnDecryptFailure { /// If the key cache fails to decrypt, create a new cache. CreateNew, @@ -55,7 +74,7 @@ pub enum OnDecryptFailure { pub struct KeystoreAndPassword { pub keystore: Keystore, - pub password: Option, + pub password: Option>, } #[derive(Debug)] @@ -243,7 +262,7 @@ impl InitializedValidator { // If the password is supplied, use it and ignore the path // (if supplied). (_, Some(password)) => ( - password.as_ref().to_vec().into(), + password.as_bytes().to_vec().into(), keystore .decrypt_keypair(password.as_ref()) .map_err(Error::UnableToDecryptKeystore)?, @@ -263,7 +282,7 @@ impl InitializedValidator { &keystore, &keystore_path, )?; - (password.as_ref().to_vec().into(), keypair) + (password.as_bytes().to_vec().into(), keypair) } }, ) @@ -436,7 +455,7 @@ fn build_web3_signer_client( fn unlock_keystore_via_stdin_password( keystore: &Keystore, keystore_path: &Path, -) -> Result<(ZeroizeString, Keypair), Error> { +) -> Result<(Zeroizing, Keypair), Error> { eprintln!(); eprintln!( "The {} file does not contain either of the following fields for {:?}:", @@ -1153,14 +1172,14 @@ impl InitializedValidators { voting_keystore_path, } => { let pw = if let Some(p) = voting_keystore_password { - p.as_ref().to_vec().into() + p.as_bytes().to_vec().into() } else if let Some(path) = voting_keystore_password_path { read_password(path).map_err(Error::UnableToReadVotingKeystorePassword)? } else { let keystore = open_keystore(voting_keystore_path)?; unlock_keystore_via_stdin_password(&keystore, voting_keystore_path)? .0 - .as_ref() + .as_bytes() .to_vec() .into() }; @@ -1194,7 +1213,7 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - pub(crate) async fn update_validators(&mut self) -> Result<(), Error> { + pub async fn update_validators(&mut self) -> Result<(), Error> { //use key cache if available let mut key_stores = HashMap::new(); @@ -1380,11 +1399,11 @@ impl InitializedValidators { // Update the enabled and total validator counts set_gauge( - &crate::http_metrics::metrics::ENABLED_VALIDATORS_COUNT, + &validator_metrics::ENABLED_VALIDATORS_COUNT, self.num_enabled() as i64, ); set_gauge( - &crate::http_metrics::metrics::TOTAL_VALIDATORS_COUNT, + &validator_metrics::TOTAL_VALIDATORS_COUNT, self.num_total() as i64, ); Ok(()) @@ -1406,7 +1425,7 @@ impl InitializedValidators { /// This should only be used for testing, it's rather destructive. pub fn delete_passwords_from_validator_definitions( &mut self, - ) -> Result, Error> { + ) -> Result>, Error> { let mut passwords = HashMap::default(); for def in self.definitions.as_mut_slice() { diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml new file mode 100644 index 0000000000..3e1a48142f --- /dev/null +++ b/validator_client/signing_method/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "signing_method" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +eth2_keystore = { workspace = true } +ethereum_serde_utils = { workspace = true } +lockfile = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +task_executor = { workspace = true } +types = { workspace = true } +url = { workspace = true } +validator_metrics = { workspace = true } diff --git a/validator_client/src/signing_method.rs b/validator_client/signing_method/src/lib.rs similarity index 95% rename from validator_client/src/signing_method.rs rename to validator_client/signing_method/src/lib.rs index d89c9b8229..f3b62c9500 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/signing_method/src/lib.rs @@ -3,7 +3,6 @@ //! - Via a local `Keypair`. //! - Via a remote signer (Web3Signer) -use crate::http_metrics::metrics; use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; @@ -50,7 +49,7 @@ pub enum SignableMessage<'a, E: EthSpec, Payload: AbstractExecPayload = FullP VoluntaryExit(&'a VoluntaryExit), } -impl<'a, E: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, E, Payload> { +impl> SignableMessage<'_, E, Payload> { /// Returns the `SignedRoot` for the contained message. /// /// The actual `SignedRoot` trait is not used since it also requires a `TreeHash` impl, which is @@ -166,8 +165,10 @@ impl SigningMethod { ) -> Result { match self { SigningMethod::LocalKeystore { voting_keypair, .. } => { - let _timer = - metrics::start_timer_vec(&metrics::SIGNING_TIMES, &[metrics::LOCAL_KEYSTORE]); + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::SIGNING_TIMES, + &[validator_metrics::LOCAL_KEYSTORE], + ); let voting_keypair = voting_keypair.clone(); // Spawn a blocking task to produce the signature. This avoids blocking the core @@ -187,8 +188,10 @@ impl SigningMethod { http_client, .. } => { - let _timer = - metrics::start_timer_vec(&metrics::SIGNING_TIMES, &[metrics::WEB3SIGNER]); + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::SIGNING_TIMES, + &[validator_metrics::WEB3SIGNER], + ); // Map the message into a Web3Signer type. let object = match signable_message { diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/signing_method/src/web3signer.rs similarity index 100% rename from validator_client/src/signing_method/web3signer.rs rename to validator_client/signing_method/src/web3signer.rs diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6982958bd5..1a098742d8 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -10,16 +10,16 @@ name = "slashing_protection_tests" path = "tests/main.rs" [dependencies] -tempfile = { workspace = true } -types = { workspace = true } -rusqlite = { workspace = true } -r2d2 = { workspace = true } -r2d2_sqlite = "0.21.0" -serde = { workspace = true } -serde_json = { workspace = true } +arbitrary = { workspace = true, features = ["derive"] } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } -arbitrary = { workspace = true, features = ["derive"] } +r2d2 = { workspace = true } +r2d2_sqlite = "0.21.0" +rusqlite = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tempfile = { workspace = true } +types = { workspace = true } [dev-dependencies] rayon = { workspace = true } diff --git a/validator_client/src/check_synced.rs b/validator_client/src/check_synced.rs deleted file mode 100644 index 2e9a62ff65..0000000000 --- a/validator_client/src/check_synced.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::beacon_node_fallback::CandidateError; -use eth2::{types::Slot, BeaconNodeHttpClient}; -use slog::{warn, Logger}; - -pub async fn check_node_health( - beacon_node: &BeaconNodeHttpClient, - log: &Logger, -) -> Result<(Slot, bool, bool), CandidateError> { - let resp = match beacon_node.get_node_syncing().await { - Ok(resp) => resp, - Err(e) => { - warn!( - log, - "Unable connect to beacon node"; - "error" => %e - ); - - return Err(CandidateError::Offline); - } - }; - - Ok(( - resp.data.head_slot, - resp.data.is_optimistic, - resp.data.el_offline, - )) -} diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 209876f07b..b2d1ebb3c2 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -247,6 +247,18 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("http-token-path") + .long("http-token-path") + .requires("http") + .value_name("HTTP_TOKEN_PATH") + .help( + "Path to file containing the HTTP API token for validator client authentication. \ + If not specified, defaults to {validators-dir}/api-token.txt." + ) + .action(ArgAction::Set) + .display_order(0) + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::new("metrics") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index f42ed55146..0fecb5202d 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,8 +1,4 @@ -use crate::beacon_node_fallback::ApiTopic; -use crate::graffiti_file::GraffitiFile; -use crate::{ - beacon_node_fallback, beacon_node_health::BeaconNodeSyncDistanceTiers, http_api, http_metrics, -}; +use beacon_node_fallback::{beacon_node_health::BeaconNodeSyncDistanceTiers, ApiTopic}; use clap::ArgMatches; use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; use directory::{ @@ -10,6 +6,8 @@ use directory::{ DEFAULT_VALIDATOR_DIR, }; use eth2::types::Graffiti; +use graffiti_file::GraffitiFile; +use initialized_validators::Config as InitializedValidatorsConfig; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{info, warn, Logger}; @@ -19,13 +17,18 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; +use validator_http_api::{self, PK_FILENAME}; +use validator_http_metrics; +use validator_store::Config as ValidatorStoreConfig; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; -pub const DEFAULT_WEB3SIGNER_KEEP_ALIVE: Option = Some(Duration::from_secs(20)); /// Stores the core configuration for this validator instance. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { + /// Configuration parameters for the validator store. + #[serde(flatten)] + pub validator_store: ValidatorStoreConfig, /// The data directory, which stores all validator databases pub validator_dir: PathBuf, /// The directory containing the passwords to unlock validator keystores. @@ -49,12 +52,10 @@ pub struct Config { pub graffiti: Option, /// Graffiti file to load per validator graffitis. pub graffiti_file: Option, - /// Fallback fallback address. - pub fee_recipient: Option
, /// Configuration for the HTTP REST API. - pub http_api: http_api::Config, + pub http_api: validator_http_api::Config, /// Configuration for the HTTP REST API. - pub http_metrics: http_metrics::Config, + pub http_metrics: validator_http_metrics::Config, /// Configuration for the Beacon Node fallback. pub beacon_node_fallback: beacon_node_fallback::Config, /// Configuration for sending metrics to a remote explorer endpoint. @@ -68,11 +69,7 @@ pub struct Config { /// (<= 64 validators) pub enable_high_validator_count_metrics: bool, /// Enable use of the blinded block endpoints during proposals. - pub builder_proposals: bool, - /// Overrides the timestamp field in builder api ValidatorRegistrationV1 pub builder_registration_timestamp_override: Option, - /// Fallback gas limit. - pub gas_limit: Option, /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, @@ -82,16 +79,11 @@ pub struct Config { pub enable_latency_measurement_service: bool, /// Defines the number of validators per `validator/register_validator` request sent to the BN. pub validator_registration_batch_size: usize, - /// Enable slashing protection even while using web3signer keys. - pub enable_web3signer_slashing_protection: bool, - /// Specifies the boost factor, a percentage multiplier to apply to the builder's payload value. - pub builder_boost_factor: Option, - /// If true, Lighthouse will prefer builder proposals, if available. - pub prefer_builder_proposals: bool, /// Whether we are running with distributed network support. pub distributed: bool, - pub web3_signer_keep_alive_timeout: Option, - pub web3_signer_max_idle_connections: Option, + /// Configuration for the initialized validators + #[serde(flatten)] + pub initialized_validators: InitializedValidatorsConfig, } impl Default for Config { @@ -109,6 +101,7 @@ impl Default for Config { let beacon_nodes = vec![SensitiveUrl::parse(DEFAULT_BEACON_NODE) .expect("beacon_nodes must always be a valid url.")]; Self { + validator_store: ValidatorStoreConfig::default(), validator_dir, secrets_dir, beacon_nodes, @@ -119,7 +112,6 @@ impl Default for Config { use_long_timeouts: false, graffiti: None, graffiti_file: None, - fee_recipient: None, http_api: <_>::default(), http_metrics: <_>::default(), beacon_node_fallback: <_>::default(), @@ -127,18 +119,12 @@ impl Default for Config { enable_doppelganger_protection: false, enable_high_validator_count_metrics: false, beacon_nodes_tls_certs: None, - builder_proposals: false, builder_registration_timestamp_override: None, - gas_limit: None, broadcast_topics: vec![ApiTopic::Subscriptions], enable_latency_measurement_service: true, validator_registration_batch_size: 500, - enable_web3signer_slashing_protection: true, - builder_boost_factor: None, - prefer_builder_proposals: false, distributed: false, - web3_signer_keep_alive_timeout: DEFAULT_WEB3SIGNER_KEEP_ALIVE, - web3_signer_max_idle_connections: None, + initialized_validators: <_>::default(), } } } @@ -233,7 +219,7 @@ impl Config { if let Some(input_fee_recipient) = parse_optional::
(cli_args, "suggested-fee-recipient")? { - config.fee_recipient = Some(input_fee_recipient); + config.validator_store.fee_recipient = Some(input_fee_recipient); } if let Some(tls_certs) = parse_optional::(cli_args, "beacon-nodes-tls-certs")? { @@ -270,7 +256,7 @@ impl Config { * Web3 signer */ if let Some(s) = parse_optional::(cli_args, "web3-signer-keep-alive-timeout")? { - config.web3_signer_keep_alive_timeout = if s == "null" { + config.initialized_validators.web3_signer_keep_alive_timeout = if s == "null" { None } else { Some(Duration::from_millis( @@ -279,7 +265,9 @@ impl Config { } } if let Some(n) = parse_optional::(cli_args, "web3-signer-max-idle-connections")? { - config.web3_signer_max_idle_connections = Some(n); + config + .initialized_validators + .web3_signer_max_idle_connections = Some(n); } /* @@ -326,6 +314,12 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } + if cli_args.get_one::("http-token-path").is_some() { + config.http_api.http_token_path = parse_required(cli_args, "http-token-path") + // For backward compatibility, default to the path under the validator dir if not provided. + .unwrap_or_else(|_| config.validator_dir.join(PK_FILENAME)); + } + /* * Prometheus metrics HTTP server */ @@ -382,14 +376,14 @@ impl Config { } if cli_args.get_flag("builder-proposals") { - config.builder_proposals = true; + config.validator_store.builder_proposals = true; } if cli_args.get_flag("prefer-builder-proposals") { - config.prefer_builder_proposals = true; + config.validator_store.prefer_builder_proposals = true; } - config.gas_limit = cli_args + config.validator_store.gas_limit = cli_args .get_one::("gas-limit") .map(|gas_limit| { gas_limit @@ -408,7 +402,8 @@ impl Config { ); } - config.builder_boost_factor = parse_optional(cli_args, "builder-boost-factor")?; + config.validator_store.builder_boost_factor = + parse_optional(cli_args, "builder-boost-factor")?; config.enable_latency_measurement_service = !cli_args.get_flag("disable-latency-measurement-service"); @@ -419,7 +414,7 @@ impl Config { return Err("validator-registration-batch-size cannot be 0".to_string()); } - config.enable_web3signer_slashing_protection = + config.validator_store.enable_web3signer_slashing_protection = if cli_args.get_flag("disable-slashing-protection-web3signer") { warn!( log, diff --git a/validator_client/src/latency.rs b/validator_client/src/latency.rs index 7e752f2923..22f02c7c0b 100644 --- a/validator_client/src/latency.rs +++ b/validator_client/src/latency.rs @@ -1,4 +1,4 @@ -use crate::{http_metrics::metrics, BeaconNodeFallback}; +use beacon_node_fallback::BeaconNodeFallback; use environment::RuntimeContext; use slog::debug; use slot_clock::SlotClock; @@ -44,14 +44,14 @@ pub fn start_latency_service( "node" => &measurement.beacon_node_id, "latency" => latency.as_millis(), ); - metrics::observe_timer_vec( - &metrics::VC_BEACON_NODE_LATENCY, + validator_metrics::observe_timer_vec( + &validator_metrics::VC_BEACON_NODE_LATENCY, &[&measurement.beacon_node_id], latency, ); if i == 0 { - metrics::observe_duration( - &metrics::VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT, + validator_metrics::observe_duration( + &validator_metrics::VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT, latency, ); } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 9a02ffdefb..8ebfe98b15 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -1,52 +1,28 @@ -mod attestation_service; -mod beacon_node_fallback; -mod beacon_node_health; -mod block_service; -mod check_synced; mod cli; -mod duties_service; -mod graffiti_file; -mod http_metrics; -mod key_cache; +pub mod config; mod latency; mod notifier; -mod preparation_service; -mod signing_method; -mod sync_committee_service; -pub mod config; -mod doppelganger_service; -pub mod http_api; -pub mod initialized_validators; -pub mod validator_store; - -pub use beacon_node_fallback::ApiTopic; -pub use beacon_node_health::BeaconNodeSyncDistanceTiers; pub use cli::cli_app; pub use config::Config; use initialized_validators::InitializedValidators; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; -pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; -use crate::beacon_node_fallback::{ +use beacon_node_fallback::{ start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, }; -use crate::doppelganger_service::DoppelgangerService; -use crate::graffiti_file::GraffitiFile; -use crate::initialized_validators::Error::UnableToOpenVotingKeystore; + use account_utils::validator_definitions::ValidatorDefinitions; -use attestation_service::{AttestationService, AttestationServiceBuilder}; -use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; -use duties_service::{sync::SyncDutiesMap, DutiesService}; +use doppelganger_service::DoppelgangerService; use environment::RuntimeContext; -use eth2::{reqwest::ClientBuilder, types::Graffiti, BeaconNodeHttpClient, StatusCode, Timeouts}; -use http_api::ApiSecret; +use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts}; +use initialized_validators::Error::UnableToOpenVotingKeystore; use notifier::spawn_notifier; use parking_lot::RwLock; -use preparation_service::{PreparationService, PreparationServiceBuilder}; use reqwest::Certificate; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -58,12 +34,20 @@ use std::net::SocketAddr; use std::path::Path; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; -use sync_committee_service::SyncCommitteeService; use tokio::{ sync::mpsc, time::{sleep, Duration}, }; -use types::{EthSpec, Hash256, PublicKeyBytes}; +use types::{EthSpec, Hash256}; +use validator_http_api::ApiSecret; +use validator_services::{ + attestation_service::{AttestationService, AttestationServiceBuilder}, + block_service::{BlockService, BlockServiceBuilder}, + duties_service::{self, DutiesService}, + preparation_service::{PreparationService, PreparationServiceBuilder}, + sync::SyncDutiesMap, + sync_committee_service::SyncCommitteeService, +}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -152,22 +136,23 @@ impl ProductionValidatorClient { ); // Optionally start the metrics server. - let http_metrics_ctx = if config.http_metrics.enabled { - let shared = http_metrics::Shared { + let validator_metrics_ctx = if config.http_metrics.enabled { + let shared = validator_http_metrics::Shared { validator_store: None, genesis_time: None, duties_service: None, }; - let ctx: Arc> = Arc::new(http_metrics::Context { - config: config.http_metrics.clone(), - shared: RwLock::new(shared), - log: log.clone(), - }); + let ctx: Arc> = + Arc::new(validator_http_metrics::Context { + config: config.http_metrics.clone(), + shared: RwLock::new(shared), + log: log.clone(), + }); let exit = context.executor.exit(); - let (_listen_addr, server) = http_metrics::serve(ctx.clone(), exit) + let (_listen_addr, server) = validator_http_metrics::serve(ctx.clone(), exit) .map_err(|e| format!("Unable to start metrics API server: {:?}", e))?; context @@ -215,7 +200,7 @@ impl ProductionValidatorClient { let validators = InitializedValidators::from_definitions( validator_defs, config.validator_dir.clone(), - config.clone(), + config.initialized_validators.clone(), log.clone(), ) .await @@ -384,20 +369,20 @@ impl ProductionValidatorClient { // Set the count for beacon node fallbacks excluding the primary beacon node. set_gauge( - &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, + &validator_metrics::ETH2_FALLBACK_CONFIGURED, num_nodes.saturating_sub(1) as i64, ); // Set the total beacon node count. set_gauge( - &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + &validator_metrics::TOTAL_BEACON_NODES_COUNT, num_nodes as i64, ); // Initialize the number of connected, synced beacon nodes to 0. - set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); - set_gauge(&http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, 0); + set_gauge(&validator_metrics::ETH2_FALLBACK_CONNECTED, 0); + set_gauge(&validator_metrics::SYNCED_BEACON_NODES_COUNT, 0); // Initialize the number of connected, avaliable beacon nodes to 0. - set_gauge(&http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, 0); + set_gauge(&validator_metrics::AVAILABLE_BEACON_NODES_COUNT, 0); let mut beacon_nodes: BeaconNodeFallback<_, E> = BeaconNodeFallback::new( candidates, @@ -422,7 +407,7 @@ impl ProductionValidatorClient { }; // Update the metrics server. - if let Some(ctx) = &http_metrics_ctx { + if let Some(ctx) = &validator_metrics_ctx { ctx.shared.write().genesis_time = Some(genesis_time); } @@ -459,7 +444,7 @@ impl ProductionValidatorClient { context.eth2_config.spec.clone(), doppelganger_service.clone(), slot_clock.clone(), - &config, + &config.validator_store, context.executor.clone(), log.clone(), )); @@ -496,7 +481,7 @@ impl ProductionValidatorClient { }); // Update the metrics server. - if let Some(ctx) = &http_metrics_ctx { + if let Some(ctx) = &validator_metrics_ctx { ctx.shared.write().validator_store = Some(validator_store.clone()); ctx.shared.write().duties_service = Some(duties_service.clone()); } @@ -566,10 +551,10 @@ impl ProductionValidatorClient { let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); let log = self.context.log(); - let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; + let api_secret = ApiSecret::create_or_open(&self.config.http_api.http_token_path)?; self.http_api_listen_addr = if self.config.http_api.enabled { - let ctx = Arc::new(http_api::Context { + let ctx = Arc::new(validator_http_api::Context { task_executor: self.context.executor.clone(), api_secret, block_service: Some(self.block_service.clone()), @@ -588,7 +573,7 @@ impl ProductionValidatorClient { let exit = self.context.executor.exit(); - let (listen_addr, server) = http_api::serve(ctx, exit) + let (listen_addr, server) = validator_http_api::serve(ctx, exit) .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; self.context @@ -850,24 +835,3 @@ pub fn load_pem_certificate>(pem_path: P) -> Result, - validator_definition_graffiti: Option, - graffiti_flag: Option, -) -> Option { - graffiti_file - .and_then(|mut g| match g.load_graffiti(validator_pubkey) { - Ok(g) => g, - Err(e) => { - warn!(log, "Failed to read graffiti file"; "error" => ?e); - None - } - }) - .or(validator_definition_graffiti) - .or(graffiti_flag) -} diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 00d7b14de7..ff66517795 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,6 +1,5 @@ -use crate::http_metrics; use crate::{DutiesService, ProductionValidatorClient}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use slog::{debug, error, info, Logger}; use slot_clock::SlotClock; use tokio::time::{sleep, Duration}; @@ -45,15 +44,15 @@ async fn notify( let num_synced_fallback = num_synced.saturating_sub(1); set_gauge( - &http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, + &validator_metrics::AVAILABLE_BEACON_NODES_COUNT, num_available as i64, ); set_gauge( - &http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, + &validator_metrics::SYNCED_BEACON_NODES_COUNT, num_synced as i64, ); set_gauge( - &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + &validator_metrics::TOTAL_BEACON_NODES_COUNT, num_total as i64, ); if num_synced > 0 { @@ -79,9 +78,9 @@ async fn notify( ) } if num_synced_fallback > 0 { - set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 1); + set_gauge(&validator_metrics::ETH2_FALLBACK_CONNECTED, 1); } else { - set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); + set_gauge(&validator_metrics::ETH2_FALLBACK_CONNECTED, 0); } for info in candidate_info { diff --git a/validator_client/validator_metrics/Cargo.toml b/validator_client/validator_metrics/Cargo.toml new file mode 100644 index 0000000000..b3cf665b26 --- /dev/null +++ b/validator_client/validator_metrics/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "validator_metrics" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[lib] +name = "validator_metrics" +path = "src/lib.rs" + +[dependencies] +metrics = { workspace = true } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/validator_metrics/src/lib.rs similarity index 82% rename from validator_client/src/http_metrics/metrics.rs rename to validator_client/validator_metrics/src/lib.rs index 8bc569c67a..060d8a4edd 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/validator_metrics/src/lib.rs @@ -1,9 +1,4 @@ -use super::Context; -use malloc_utils::scrape_allocator_metrics; -use slot_clock::SlotClock; use std::sync::LazyLock; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::EthSpec; pub const SUCCESS: &str = "success"; pub const SLASHABLE: &str = "slashable"; @@ -38,7 +33,7 @@ pub const SUBSCRIPTIONS: &str = "subscriptions"; pub const LOCAL_KEYSTORE: &str = "local_keystore"; pub const WEB3SIGNER: &str = "web3signer"; -pub use lighthouse_metrics::*; +pub use metrics::*; pub static GENESIS_DISTANCE: LazyLock> = LazyLock::new(|| { try_create_int_gauge( @@ -267,58 +262,3 @@ pub static VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT: LazyLock> "Round-trip latency for the primary BN endpoint", ) }); - -pub fn gather_prometheus_metrics( - ctx: &Context, -) -> std::result::Result { - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - - { - let shared = ctx.shared.read(); - - if let Some(genesis_time) = shared.genesis_time { - if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { - let distance = now.as_secs() as i64 - genesis_time as i64; - set_gauge(&GENESIS_DISTANCE, distance); - } - } - - if let Some(duties_service) = &shared.duties_service { - if let Some(slot) = duties_service.slot_clock.now() { - let current_epoch = slot.epoch(E::slots_per_epoch()); - let next_epoch = current_epoch + 1; - - set_int_gauge( - &PROPOSER_COUNT, - &[CURRENT_EPOCH], - duties_service.proposer_count(current_epoch) as i64, - ); - set_int_gauge( - &ATTESTER_COUNT, - &[CURRENT_EPOCH], - duties_service.attester_count(current_epoch) as i64, - ); - set_int_gauge( - &ATTESTER_COUNT, - &[NEXT_EPOCH], - duties_service.attester_count(next_epoch) as i64, - ); - } - } - } - - // It's important to ensure these metrics are explicitly enabled in the case that users aren't - // using glibc and this function causes panics. - if ctx.config.allocator_metrics_enabled { - scrape_allocator_metrics(); - } - - warp_utils::metrics::scrape_health_metrics(); - - encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) - .unwrap(); - - String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) -} diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml new file mode 100644 index 0000000000..21f0ae2d77 --- /dev/null +++ b/validator_client/validator_services/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "validator_services" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +beacon_node_fallback = { workspace = true } +bls = { workspace = true } +doppelganger_service = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +futures = { workspace = true } +graffiti_file = { workspace = true } +parking_lot = { workspace = true } +safe_arith = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } +validator_store = { workspace = true } diff --git a/validator_client/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs similarity index 95% rename from validator_client/src/attestation_service.rs rename to validator_client/validator_services/src/attestation_service.rs index 5363f36f66..e31ad4f661 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,9 +1,5 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; -use crate::{ - duties_service::{DutiesService, DutyAndProof}, - http_metrics::metrics, - validator_store::{Error as ValidatorStoreError, ValidatorStore}, -}; +use crate::duties_service::{DutiesService, DutyAndProof}; +use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use environment::RuntimeContext; use futures::future::join_all; use slog::{crit, debug, error, info, trace, warn}; @@ -14,8 +10,10 @@ use std::sync::Arc; use tokio::time::{sleep, sleep_until, Duration, Instant}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use validator_store::{Error as ValidatorStoreError, ValidatorStore}; /// Builds an `AttestationService`. +#[derive(Default)] pub struct AttestationServiceBuilder { duties_service: Option>>, validator_store: Option>>, @@ -238,9 +236,9 @@ impl AttestationService { aggregate_production_instant: Instant, ) -> Result<(), ()> { let log = self.context.log(); - let attestations_timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS], + let attestations_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS], ); // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have @@ -278,9 +276,9 @@ impl AttestationService { sleep_until(aggregate_production_instant).await; // Start the metrics timer *after* we've done the delay. - let _aggregates_timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES], + let _aggregates_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES], ); // Then download, sign and publish a `SignedAggregateAndProof` for each @@ -339,9 +337,9 @@ impl AttestationService { let attestation_data = self .beacon_nodes .first_success(|beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_GET], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_GET], ); beacon_node .get_validator_attestation_data(slot, committee_index) @@ -454,9 +452,9 @@ impl AttestationService { match self .beacon_nodes .request(ApiTopic::Attestations, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_POST], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_POST], ); if fork_name.electra_enabled() { beacon_node @@ -531,9 +529,9 @@ impl AttestationService { let aggregated_attestation = &self .beacon_nodes .first_success(|beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_GET], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES_HTTP_GET], ); if fork_name.electra_enabled() { beacon_node @@ -620,9 +618,9 @@ impl AttestationService { match self .beacon_nodes .first_success(|beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_POST], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES_HTTP_POST], ); if fork_name.electra_enabled() { beacon_node diff --git a/validator_client/src/block_service.rs b/validator_client/validator_services/src/block_service.rs similarity index 93% rename from validator_client/src/block_service.rs rename to validator_client/validator_services/src/block_service.rs index 665eaf0a0f..60eb0361ad 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -1,17 +1,9 @@ -use crate::beacon_node_fallback::{Error as FallbackError, Errors}; -use crate::{ - beacon_node_fallback::{ApiTopic, BeaconNodeFallback}, - determine_graffiti, - graffiti_file::GraffitiFile, -}; -use crate::{ - http_metrics::metrics, - validator_store::{Error as ValidatorStoreError, ValidatorStore}, -}; +use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, Error as FallbackError, Errors}; use bls::SignatureBytes; use environment::RuntimeContext; use eth2::types::{FullBlockContents, PublishBlockRequest}; use eth2::{BeaconNodeHttpClient, StatusCode}; +use graffiti_file::{determine_graffiti, GraffitiFile}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::fmt::Debug; @@ -24,6 +16,7 @@ use types::{ BlindedBeaconBlock, BlockType, EthSpec, Graffiti, PublicKeyBytes, SignedBlindedBeaconBlock, Slot, }; +use validator_store::{Error as ValidatorStoreError, ValidatorStore}; #[derive(Debug)] pub enum BlockError { @@ -50,6 +43,7 @@ impl From> for BlockError { } /// Builds a `BlockService`. +#[derive(Default)] pub struct BlockServiceBuilder { validator_store: Option>>, slot_clock: Option>, @@ -186,8 +180,8 @@ impl ProposerFallback { pub struct Inner { validator_store: Arc>, slot_clock: Arc, - pub(crate) beacon_nodes: Arc>, - pub(crate) proposer_nodes: Option>>, + pub beacon_nodes: Arc>, + pub proposer_nodes: Option>>, context: RuntimeContext, graffiti: Option, graffiti_file: Option, @@ -247,8 +241,10 @@ impl BlockService { /// Attempt to produce a block for any block producers in the `ValidatorStore`. async fn do_update(&self, notification: BlockServiceNotification) -> Result<(), ()> { let log = self.context.log(); - let _timer = - metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::FULL_UPDATE]); + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::FULL_UPDATE], + ); let slot = self.slot_clock.now().ok_or_else(move || { crit!(log, "Duties manager failed to read slot clock"); @@ -337,7 +333,7 @@ impl BlockService { unsigned_block: UnsignedBlock, ) -> Result<(), BlockError> { let log = self.context.log(); - let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); + let signing_timer = validator_metrics::start_timer(&validator_metrics::BLOCK_SIGNING_TIMES); let res = match unsigned_block { UnsignedBlock::Full(block_contents) => { @@ -418,8 +414,10 @@ impl BlockService { builder_boost_factor: Option, ) -> Result<(), BlockError> { let log = self.context.log(); - let _timer = - metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK], + ); let randao_reveal = match self .validator_store @@ -475,9 +473,9 @@ impl BlockService { // great view of attestations on the network. let unsigned_block = proposer_fallback .request_proposers_last(|beacon_node| async move { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], ); Self::get_validator_block( &beacon_node, @@ -520,22 +518,22 @@ impl BlockService { let slot = signed_block.slot(); match signed_block { SignedBlock::Full(signed_block) => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], + let _post_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block) + .post_beacon_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } SignedBlock::Blinded(signed_block) => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + let _post_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block) + .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } diff --git a/validator_client/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs similarity index 95% rename from validator_client/src/duties_service.rs rename to validator_client/validator_services/src/duties_service.rs index cf8d499792..187eb4feb5 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -6,15 +6,11 @@ //! The `DutiesService` is also responsible for sending events to the `BlockService` which trigger //! block production. -pub mod sync; - -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; -use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; -use crate::{ - block_service::BlockServiceNotification, - http_metrics::metrics, - validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}, -}; +use crate::block_service::BlockServiceNotification; +use crate::sync::poll_sync_committee_duties; +use crate::sync::SyncDutiesMap; +use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use doppelganger_service::DoppelgangerStatus; use environment::RuntimeContext; use eth2::types::{ AttesterData, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, @@ -29,10 +25,10 @@ use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; -use sync::poll_sync_committee_duties; -use sync::SyncDutiesMap; use tokio::{sync::mpsc::Sender, time::sleep}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; +use validator_metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; +use validator_store::{Error as ValidatorStoreError, ValidatorStore}; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; @@ -473,8 +469,10 @@ pub fn start_update_service( async fn poll_validator_indices( duties_service: &DutiesService, ) { - let _timer = - metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_INDICES]); + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_INDICES], + ); let log = duties_service.context.log(); @@ -518,9 +516,9 @@ async fn poll_validator_indices( let download_result = duties_service .beacon_nodes .first_success(|beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::VALIDATOR_ID_HTTP_GET], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::VALIDATOR_ID_HTTP_GET], ); beacon_node .get_beacon_states_validator_id( @@ -604,9 +602,9 @@ async fn poll_validator_indices( async fn poll_beacon_attesters( duties_service: &Arc>, ) -> Result<(), Error> { - let current_epoch_timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::UPDATE_ATTESTERS_CURRENT_EPOCH], + let current_epoch_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_ATTESTERS_CURRENT_EPOCH], ); let log = duties_service.context.log(); @@ -660,9 +658,9 @@ async fn poll_beacon_attesters( update_per_validator_duty_metrics::(duties_service, current_epoch, current_slot); drop(current_epoch_timer); - let next_epoch_timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::UPDATE_ATTESTERS_NEXT_EPOCH], + let next_epoch_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_ATTESTERS_NEXT_EPOCH], ); // Download the duties and update the duties for the next epoch. @@ -682,8 +680,10 @@ async fn poll_beacon_attesters( update_per_validator_duty_metrics::(duties_service, next_epoch, current_slot); drop(next_epoch_timer); - let subscriptions_timer = - metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); + let subscriptions_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::SUBSCRIPTIONS], + ); // This vector is intentionally oversized by 10% so that it won't reallocate. // Each validator has 2 attestation duties occuring in the current and next epoch, for which @@ -741,9 +741,9 @@ async fn poll_beacon_attesters( let subscription_result = duties_service .beacon_nodes .request(ApiTopic::Subscriptions, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::SUBSCRIPTIONS_HTTP_POST], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::SUBSCRIPTIONS_HTTP_POST], ); beacon_node .post_validator_beacon_committee_subscriptions(subscriptions_ref) @@ -815,9 +815,9 @@ async fn poll_beacon_attesters_for_epoch( return Ok(()); } - let fetch_timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::UPDATE_ATTESTERS_FETCH], + let fetch_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_ATTESTERS_FETCH], ); // Request duties for all uninitialized validators. If there isn't any, we will just request for @@ -883,9 +883,9 @@ async fn poll_beacon_attesters_for_epoch( drop(fetch_timer); - let _store_timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::UPDATE_ATTESTERS_STORE], + let _store_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_ATTESTERS_STORE], ); debug!( @@ -1029,9 +1029,9 @@ async fn post_validator_duties_attester( duties_service .beacon_nodes .first_success(|beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::ATTESTER_DUTIES_HTTP_POST], ); beacon_node .post_validator_duties_attester(epoch, validator_indices) @@ -1089,9 +1089,9 @@ async fn fill_in_selection_proofs( continue; } - let timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTATION_SELECTION_PROOFS], + let timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::ATTESTATION_SELECTION_PROOFS], ); // Sign selection proofs (serially). @@ -1223,8 +1223,10 @@ async fn poll_beacon_proposers( duties_service: &DutiesService, block_service_tx: &mut Sender, ) -> Result<(), Error> { - let _timer = - metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_PROPOSERS]); + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_PROPOSERS], + ); let log = duties_service.context.log(); @@ -1261,9 +1263,9 @@ async fn poll_beacon_proposers( let download_result = duties_service .beacon_nodes .first_success(|beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::PROPOSER_DUTIES_HTTP_GET], + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::PROPOSER_DUTIES_HTTP_GET], ); beacon_node .get_validator_duties_proposer(current_epoch) @@ -1341,7 +1343,7 @@ async fn poll_beacon_proposers( "Detected new block proposer"; "current_slot" => current_slot, ); - metrics::inc_counter(&metrics::PROPOSAL_CHANGED); + validator_metrics::inc_counter(&validator_metrics::PROPOSAL_CHANGED); } } diff --git a/validator_client/validator_services/src/lib.rs b/validator_client/validator_services/src/lib.rs new file mode 100644 index 0000000000..abf8fab3cb --- /dev/null +++ b/validator_client/validator_services/src/lib.rs @@ -0,0 +1,6 @@ +pub mod attestation_service; +pub mod block_service; +pub mod duties_service; +pub mod preparation_service; +pub mod sync; +pub mod sync_committee_service; diff --git a/validator_client/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs similarity index 97% rename from validator_client/src/preparation_service.rs rename to validator_client/validator_services/src/preparation_service.rs index 010c651c25..480f4af2b3 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -1,6 +1,6 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; -use crate::validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; +use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use bls::PublicKeyBytes; +use doppelganger_service::DoppelgangerStatus; use environment::RuntimeContext; use parking_lot::RwLock; use slog::{debug, error, info, warn}; @@ -15,6 +15,7 @@ use types::{ Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, ValidatorRegistrationData, }; +use validator_store::{Error as ValidatorStoreError, ProposalData, ValidatorStore}; /// Number of epochs before the Bellatrix hard fork to begin posting proposer preparations. const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; @@ -23,6 +24,7 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; /// Builds an `PreparationService`. +#[derive(Default)] pub struct PreparationServiceBuilder { validator_store: Option>>, slot_clock: Option, @@ -492,11 +494,3 @@ impl PreparationService { Ok(()) } } - -/// A helper struct, used for passing data from the validator store to services. -pub struct ProposalData { - pub(crate) validator_index: Option, - pub(crate) fee_recipient: Option
, - pub(crate) gas_limit: u64, - pub(crate) builder_proposals: bool, -} diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/validator_services/src/sync.rs similarity index 98% rename from validator_client/src/duties_service/sync.rs rename to validator_client/validator_services/src/sync.rs index 0bd99dc638..af501326f4 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -1,10 +1,5 @@ -use crate::{ - doppelganger_service::DoppelgangerStatus, - duties_service::{DutiesService, Error}, - http_metrics::metrics, - validator_store::Error as ValidatorStoreError, -}; - +use crate::duties_service::{DutiesService, Error}; +use doppelganger_service::DoppelgangerStatus; use futures::future::join_all; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; use slog::{crit, debug, info, warn}; @@ -13,6 +8,7 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; use types::{ChainSpec, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; +use validator_store::Error as ValidatorStoreError; /// Number of epochs in advance to compute selection proofs when not in `distributed` mode. pub const AGGREGATION_PRE_COMPUTE_EPOCHS: u64 = 2; @@ -442,9 +438,9 @@ pub async fn poll_sync_committee_duties_for_period"] + +[lib] +name = "validator_store" +path = "src/lib.rs" + +[dependencies] +account_utils = { workspace = true } +doppelganger_service = { workspace = true } +initialized_validators = { workspace = true } +parking_lot = { workspace = true } +serde = { workspace = true } +signing_method = { workspace = true } +slashing_protection = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } diff --git a/validator_client/src/validator_store.rs b/validator_client/validator_store/src/lib.rs similarity index 90% rename from validator_client/src/validator_store.rs rename to validator_client/validator_store/src/lib.rs index af59ad9892..837af5b51d 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,12 +1,9 @@ -use crate::{ - doppelganger_service::DoppelgangerService, - http_metrics::metrics, - initialized_validators::InitializedValidators, - signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, - Config, -}; use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; +use doppelganger_service::{DoppelgangerService, DoppelgangerStatus, DoppelgangerValidatorStore}; +use initialized_validators::InitializedValidators; use parking_lot::{Mutex, RwLock}; +use serde::{Deserialize, Serialize}; +use signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, }; @@ -26,9 +23,6 @@ use types::{ ValidatorRegistrationData, VoluntaryExit, }; -pub use crate::doppelganger_service::DoppelgangerStatus; -use crate::preparation_service::ProposalData; - #[derive(Debug, PartialEq)] pub enum Error { DoppelgangerProtected(PublicKeyBytes), @@ -48,6 +42,30 @@ impl From for Error { } } +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Config { + /// Fallback fee recipient address. + pub fee_recipient: Option
, + /// Fallback gas limit. + pub gas_limit: Option, + /// Enable use of the blinded block endpoints during proposals. + pub builder_proposals: bool, + /// Enable slashing protection even while using web3signer keys. + pub enable_web3signer_slashing_protection: bool, + /// If true, Lighthouse will prefer builder proposals, if available. + pub prefer_builder_proposals: bool, + /// Specifies the boost factor, a percentage multiplier to apply to the builder's payload value. + pub builder_boost_factor: Option, +} + +/// A helper struct, used for passing data from the validator store to services. +pub struct ProposalData { + pub validator_index: Option, + pub fee_recipient: Option
, + pub gas_limit: u64, + pub builder_proposals: bool, +} + /// Number of epochs of slashing protection history to keep. /// /// This acts as a maximum safe-guard against clock drift. @@ -77,6 +95,12 @@ pub struct ValidatorStore { _phantom: PhantomData, } +impl DoppelgangerValidatorStore for ValidatorStore { + fn get_validator_index(&self, pubkey: &PublicKeyBytes) -> Option { + self.validator_index(pubkey) + } +} + impl ValidatorStore { // All arguments are different types. Making the fields `pub` is undesired. A builder seems // unnecessary. @@ -590,7 +614,10 @@ impl ValidatorStore { match slashing_status { // We can safely sign this block without slashing. Ok(Safe::Valid) => { - metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SUCCESS]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_BLOCKS_TOTAL, + &[validator_metrics::SUCCESS], + ); let signature = signing_method .get_signature::( @@ -607,7 +634,10 @@ impl ValidatorStore { self.log, "Skipping signing of previously signed block"; ); - metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SAME_DATA]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_BLOCKS_TOTAL, + &[validator_metrics::SAME_DATA], + ); Err(Error::SameData) } Err(NotSafe::UnregisteredValidator(pk)) => { @@ -617,7 +647,10 @@ impl ValidatorStore { "msg" => "Carefully consider running with --init-slashing-protection (see --help)", "public_key" => format!("{:?}", pk) ); - metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::UNREGISTERED]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_BLOCKS_TOTAL, + &[validator_metrics::UNREGISTERED], + ); Err(Error::Slashable(NotSafe::UnregisteredValidator(pk))) } Err(e) => { @@ -626,7 +659,10 @@ impl ValidatorStore { "Not signing slashable block"; "error" => format!("{:?}", e) ); - metrics::inc_counter_vec(&metrics::SIGNED_BLOCKS_TOTAL, &[metrics::SLASHABLE]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_BLOCKS_TOTAL, + &[validator_metrics::SLASHABLE], + ); Err(Error::Slashable(e)) } } @@ -681,7 +717,10 @@ impl ValidatorStore { .add_signature(&signature, validator_committee_position) .map_err(Error::UnableToSignAttestation)?; - metrics::inc_counter_vec(&metrics::SIGNED_ATTESTATIONS_TOTAL, &[metrics::SUCCESS]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, + &[validator_metrics::SUCCESS], + ); Ok(()) } @@ -690,9 +729,9 @@ impl ValidatorStore { self.log, "Skipping signing of previously signed attestation" ); - metrics::inc_counter_vec( - &metrics::SIGNED_ATTESTATIONS_TOTAL, - &[metrics::SAME_DATA], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, + &[validator_metrics::SAME_DATA], ); Err(Error::SameData) } @@ -703,9 +742,9 @@ impl ValidatorStore { "msg" => "Carefully consider running with --init-slashing-protection (see --help)", "public_key" => format!("{:?}", pk) ); - metrics::inc_counter_vec( - &metrics::SIGNED_ATTESTATIONS_TOTAL, - &[metrics::UNREGISTERED], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, + &[validator_metrics::UNREGISTERED], ); Err(Error::Slashable(NotSafe::UnregisteredValidator(pk))) } @@ -716,9 +755,9 @@ impl ValidatorStore { "attestation" => format!("{:?}", attestation.data()), "error" => format!("{:?}", e) ); - metrics::inc_counter_vec( - &metrics::SIGNED_ATTESTATIONS_TOTAL, - &[metrics::SLASHABLE], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, + &[validator_metrics::SLASHABLE], ); Err(Error::Slashable(e)) } @@ -743,7 +782,10 @@ impl ValidatorStore { ) .await?; - metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, + &[validator_metrics::SUCCESS], + ); Ok(SignedVoluntaryExit { message: voluntary_exit, @@ -769,9 +811,9 @@ impl ValidatorStore { ) .await?; - metrics::inc_counter_vec( - &metrics::SIGNED_VALIDATOR_REGISTRATIONS_TOTAL, - &[metrics::SUCCESS], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_VALIDATOR_REGISTRATIONS_TOTAL, + &[validator_metrics::SUCCESS], ); Ok(SignedValidatorRegistrationData { @@ -807,7 +849,10 @@ impl ValidatorStore { ) .await?; - metrics::inc_counter_vec(&metrics::SIGNED_AGGREGATES_TOTAL, &[metrics::SUCCESS]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_AGGREGATES_TOTAL, + &[validator_metrics::SUCCESS], + ); Ok(SignedAggregateAndProof::from_aggregate_and_proof( message, signature, @@ -843,7 +888,10 @@ impl ValidatorStore { .await .map_err(Error::UnableToSign)?; - metrics::inc_counter_vec(&metrics::SIGNED_SELECTION_PROOFS_TOTAL, &[metrics::SUCCESS]); + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SELECTION_PROOFS_TOTAL, + &[validator_metrics::SUCCESS], + ); Ok(signature.into()) } @@ -862,9 +910,9 @@ impl ValidatorStore { // Bypass `with_validator_signing_method`: sync committee messages are not slashable. let signing_method = self.doppelganger_bypassed_signing_method(*validator_pubkey)?; - metrics::inc_counter_vec( - &metrics::SIGNED_SYNC_SELECTION_PROOFS_TOTAL, - &[metrics::SUCCESS], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_SELECTION_PROOFS_TOTAL, + &[validator_metrics::SUCCESS], ); let message = SyncAggregatorSelectionData { @@ -911,9 +959,9 @@ impl ValidatorStore { .await .map_err(Error::UnableToSign)?; - metrics::inc_counter_vec( - &metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, - &[metrics::SUCCESS], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, + &[validator_metrics::SUCCESS], ); Ok(SyncCommitteeMessage { @@ -953,9 +1001,9 @@ impl ValidatorStore { .await .map_err(Error::UnableToSign)?; - metrics::inc_counter_vec( - &metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, - &[metrics::SUCCESS], + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, + &[validator_metrics::SUCCESS], ); Ok(SignedContributionAndProof { message, signature }) @@ -1029,7 +1077,8 @@ impl ValidatorStore { info!(self.log, "Pruning slashing protection DB"; "epoch" => current_epoch); } - let _timer = metrics::start_timer(&metrics::SLASHING_PROTECTION_PRUNE_TIMES); + let _timer = + validator_metrics::start_timer(&validator_metrics::SLASHING_PROTECTION_PRUNE_TIMES); let new_min_target_epoch = current_epoch.saturating_sub(SLASHING_PROTECTION_HISTORY_EPOCHS); let new_min_slot = new_min_target_epoch.start_slot(E::slots_per_epoch()); diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index ebcde6a828..7cb05616f4 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -2,26 +2,27 @@ name = "validator_manager" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = { workspace = true } -types = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } -clap_utils = { workspace = true } -eth2_wallet = { workspace = true } account_utils = { workspace = true } +clap = { workspace = true } +clap_utils = { workspace = true } +derivative = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } +eth2_wallet = { workspace = true } +ethereum_serde_utils = { workspace = true } +hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -ethereum_serde_utils = { workspace = true } -tree_hash = { workspace = true } -eth2 = { workspace = true } -hex = { workspace = true } tokio = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } regex = { workspace = true } -validator_client = { workspace = true } +tempfile = { workspace = true } +validator_http_api = { workspace = true } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 4a35791b32..cc4157990f 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,5 +1,5 @@ +use account_utils::strip_off_newlines; pub use account_utils::STDIN_INPUTS_FLAG; -use account_utils::{strip_off_newlines, ZeroizeString}; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ lighthouse_vc::{ @@ -14,6 +14,7 @@ use std::fs; use std::path::{Path, PathBuf}; use tree_hash::TreeHash; use types::*; +use zeroize::Zeroizing; pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; pub const COUNT_FLAG: &str = "count"; @@ -41,7 +42,7 @@ pub enum UploadError { #[derive(Clone, Serialize, Deserialize)] pub struct ValidatorSpecification { pub voting_keystore: KeystoreJsonStr, - pub voting_keystore_password: ZeroizeString, + pub voting_keystore_password: Zeroizing, pub slashing_protection: Option, pub fee_recipient: Option
, pub gas_limit: Option, diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 37a6040a9b..d4403b4613 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -45,15 +45,6 @@ pub fn cli_app() -> Command { Another, optional JSON file is created which contains a list of validator \ deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(OUTPUT_PATH_FLAG) .long(OUTPUT_PATH_FLAG) diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs new file mode 100644 index 0000000000..a2d6c062fa --- /dev/null +++ b/validator_manager/src/delete_validators.rs @@ -0,0 +1,293 @@ +use clap::{Arg, ArgAction, ArgMatches, Command}; +use eth2::{ + lighthouse_vc::types::{DeleteKeystoreStatus, DeleteKeystoresRequest}, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use types::PublicKeyBytes; + +use crate::{common::vc_http_client, DumpConfig}; + +pub const CMD: &str = "delete"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const VALIDATOR_FLAG: &str = "validators"; + +#[derive(Debug)] +pub enum DeleteError { + InvalidPublicKey, + DeleteFailed(eth2::Error), +} + +pub fn cli_app() -> Command { + Command::new(CMD) + .about("Deletes one or more validators from a validator client using the HTTP API.") + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VALIDATOR_FLAG) + .long(VALIDATOR_FLAG) + .value_name("STRING") + .help("Comma-separated list of validators (pubkey) that will be deleted.") + .action(ArgAction::Set) + .required(true) + .display_order(0), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct DeleteConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, + pub validators_to_delete: Vec, +} + +impl DeleteConfig { + fn from_cli(matches: &ArgMatches) -> Result { + let validators_to_delete_str = + clap_utils::parse_required::(matches, VALIDATOR_FLAG)?; + + let validators_to_delete = validators_to_delete_str + .split(',') + .map(|s| s.trim().parse()) + .collect::, _>>()?; + + Ok(Self { + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + validators_to_delete, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + }) + } +} + +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { + let config = DeleteConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: DeleteConfig) -> Result<(), String> { + let DeleteConfig { + vc_url, + vc_token_path, + validators_to_delete, + } = config; + + let (http_client, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + for validator_to_delete in &validators_to_delete { + if !validators + .iter() + .any(|validator| &validator.validating_pubkey == validator_to_delete) + { + return Err(format!("Validator {} doesn't exist", validator_to_delete)); + } + } + + let delete_request = DeleteKeystoresRequest { + pubkeys: validators_to_delete.clone(), + }; + + let responses = http_client + .delete_keystores(&delete_request) + .await + .map_err(|e| format!("Error deleting keystore {}", e))? + .data; + + let mut error = false; + for (validator_to_delete, response) in validators_to_delete.iter().zip(responses.iter()) { + if response.status == DeleteKeystoreStatus::Error + || response.status == DeleteKeystoreStatus::NotFound + || response.status == DeleteKeystoreStatus::NotActive + { + error = true; + eprintln!( + "Problem with removing validator {:?}, status: {:?}", + validator_to_delete, response.status + ); + } + } + if error { + return Err("Problem with removing one or more validators".to_string()); + } + + eprintln!("Validator(s) deleted"); + Ok(()) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use std::{ + fs::{self, File}, + io::Write, + str::FromStr, + }; + + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + + struct TestBuilder { + delete_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + } + + impl TestBuilder { + async fn new() -> Self { + Self { + delete_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + } + } + + async fn with_validators( + mut self, + count: u32, + first_index: u32, + indices_of_validators_to_delete: Vec, + ) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = { + let contents = + fs::read_to_string(builder.get_import_config().validators_file_path.unwrap()) + .unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + let import_config = builder.get_import_config(); + + let validators_to_delete = indices_of_validators_to_delete + .iter() + .map(|&index| { + PublicKeyBytes::from_str( + format!("0x{}", local_validators[index].voting_keystore.pubkey()).as_str(), + ) + .unwrap() + }) + .collect(); + + self.delete_config = Some(DeleteConfig { + vc_url: import_config.vc_url, + vc_token_path: import_config.vc_token_path, + validators_to_delete, + }); + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self + } + + pub async fn run_test(self) -> TestResult { + let import_builder = self.src_import_builder.unwrap(); + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + + let path = self.delete_config.clone().unwrap().vc_token_path; + let url = self.delete_config.clone().unwrap().vc_url; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path.clone()) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + let result = run(self.delete_config.clone().unwrap()).await; + + if result.is_ok() { + let (_, list_keystores_response) = vc_http_client(url, path.clone()).await.unwrap(); + + // The remaining number of active keystores (left) = Total validators - Deleted validators (right) + assert_eq!( + list_keystores_response.len(), + self.validators.len() + - self + .delete_config + .clone() + .unwrap() + .validators_to_delete + .len() + ); + + // Check the remaining validator keys are not in validators_to_delete + assert!(list_keystores_response.iter().all(|keystore| { + !self + .delete_config + .clone() + .unwrap() + .validators_to_delete + .contains(&keystore.validating_pubkey) + })); + + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn delete_multiple_validators() { + TestBuilder::new() + .await + .with_validators(3, 0, vec![0, 1, 2]) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index f193e8d0fb..3cebc10bb3 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,16 +1,29 @@ use super::common::*; use crate::DumpConfig; +use account_utils::eth2_keystore::Keystore; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; +use derivative::Derivative; +use eth2::lighthouse_vc::types::KeystoreJsonStr; use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use types::Address; +use zeroize::Zeroizing; pub const CMD: &str = "import"; pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; +pub const KEYSTORE_FILE_FLAG: &str = "keystore-file"; pub const VC_URL_FLAG: &str = "vc-url"; pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const PASSWORD: &str = "password"; +pub const FEE_RECIPIENT: &str = "suggested-fee-recipient"; +pub const GAS_LIMIT: &str = "gas-limit"; +pub const BUILDER_PROPOSALS: &str = "builder-proposals"; +pub const BUILDER_BOOST_FACTOR: &str = "builder-boost-factor"; +pub const PREFER_BUILDER_PROPOSALS: &str = "prefer-builder-proposals"; +pub const ENABLED: &str = "enabled"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; @@ -21,15 +34,6 @@ pub fn cli_app() -> Command { are defined in a JSON file which can be generated using the \"create-validators\" \ command.", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(VALIDATORS_FILE_FLAG) .long(VALIDATORS_FILE_FLAG) @@ -39,19 +43,32 @@ pub fn cli_app() -> Command { imported to the validator client. This file is usually named \ \"validators.json\".", ) - .required(true) .action(ArgAction::Set) - .display_order(0), + .display_order(0) + .required_unless_present("keystore-file") + .conflicts_with("keystore-file"), + ) + .arg( + Arg::new(KEYSTORE_FILE_FLAG) + .long(KEYSTORE_FILE_FLAG) + .value_name("PATH_TO_KEYSTORE_FILE") + .help( + "The path to a keystore JSON file to be \ + imported to the validator client. This file is usually created \ + using staking-deposit-cli or ethstaker-deposit-cli", + ) + .action(ArgAction::Set) + .display_order(0) + .conflicts_with("validators-file") + .required_unless_present("validators-file") + .requires(PASSWORD), ) .arg( Arg::new(VC_URL_FLAG) .long(VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( - "A HTTP(S) address of a validator client using the keymanager-API. \ - If this value is not supplied then a 'dry run' will be conducted where \ - no changes are made to the validator client.", - ) + "A HTTP(S) address of a validator client using the keymanager-API.") .default_value("http://localhost:5062") .requires(VC_TOKEN_FLAG) .action(ArgAction::Set) @@ -80,29 +97,111 @@ pub fn cli_app() -> Command { ) .display_order(0), ) + .arg( + Arg::new(PASSWORD) + .long(PASSWORD) + .value_name("STRING") + .help("Password of the keystore file.") + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(FEE_RECIPIENT) + .long(FEE_RECIPIENT) + .value_name("ETH1_ADDRESS") + .help("When provided, the imported validator will use the suggested fee recipient. Omit this flag to use the default value from the VC.") + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(GAS_LIMIT) + .long(GAS_LIMIT) + .value_name("UINT64") + .help("When provided, the imported validator will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.",) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(BUILDER_PROPOSALS) + .long(BUILDER_PROPOSALS) + .help("When provided, the imported validator will attempt to create \ + blocks via builder rather than the local EL.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(BUILDER_BOOST_FACTOR) + .long(BUILDER_BOOST_FACTOR) + .value_name("UINT64") + .help("When provided, the imported validator will use this \ + percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.",) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(PREFER_BUILDER_PROPOSALS) + .long(PREFER_BUILDER_PROPOSALS) + .help("When provided, the imported validator will always prefer blocks \ + constructed by builders, regardless of payload value.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) } -#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Serialize, Deserialize, Derivative)] +#[derivative(Debug)] pub struct ImportConfig { - pub validators_file_path: PathBuf, + pub validators_file_path: Option, + pub keystore_file_path: Option, pub vc_url: SensitiveUrl, pub vc_token_path: PathBuf, pub ignore_duplicates: bool, + #[derivative(Debug = "ignore")] + pub password: Option>, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub builder_proposals: Option, + pub builder_boost_factor: Option, + pub prefer_builder_proposals: Option, + pub enabled: Option, } impl ImportConfig { fn from_cli(matches: &ArgMatches) -> Result { Ok(Self { - validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, + validators_file_path: clap_utils::parse_optional(matches, VALIDATORS_FILE_FLAG)?, + keystore_file_path: clap_utils::parse_optional(matches, KEYSTORE_FILE_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), + password: clap_utils::parse_optional(matches, PASSWORD)?.map(Zeroizing::new), + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT)?, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS)?, + builder_boost_factor: clap_utils::parse_optional(matches, BUILDER_BOOST_FACTOR)?, + prefer_builder_proposals: clap_utils::parse_optional( + matches, + PREFER_BUILDER_PROPOSALS, + )?, + enabled: clap_utils::parse_optional(matches, ENABLED)?, }) } } pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = ImportConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { Ok(()) } else { @@ -113,27 +212,61 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() async fn run<'a>(config: ImportConfig) -> Result<(), String> { let ImportConfig { validators_file_path, + keystore_file_path, vc_url, vc_token_path, ignore_duplicates, + password, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, } = config; - if !validators_file_path.exists() { - return Err(format!("Unable to find file at {:?}", validators_file_path)); - } + let validators: Vec = + if let Some(validators_format_path) = &validators_file_path { + if !validators_format_path.exists() { + return Err(format!( + "Unable to find file at {:?}", + validators_format_path + )); + } - let validators_file = fs::OpenOptions::new() - .read(true) - .create(false) - .open(&validators_file_path) - .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e))?; - let validators: Vec = serde_json::from_reader(&validators_file) - .map_err(|e| { - format!( - "Unable to parse JSON in {:?}: {:?}", - validators_file_path, e - ) - })?; + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(validators_format_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_format_path, e))?; + + serde_json::from_reader(&validators_file).map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_format_path, e + ) + })? + } else if let Some(keystore_format_path) = &keystore_file_path { + vec![ValidatorSpecification { + voting_keystore: KeystoreJsonStr( + Keystore::from_json_file(keystore_format_path).map_err(|e| format!("{e:?}"))?, + ), + voting_keystore_password: password.ok_or_else(|| { + "The --password flag is required to supply the keystore password".to_string() + })?, + slashing_protection: None, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, + }] + } else { + return Err(format!( + "One of the flag --{VALIDATORS_FILE_FLAG} or --{KEYSTORE_FILE_FLAG} is required." + )); + }; let count = validators.len(); @@ -250,9 +383,9 @@ async fn run<'a>(config: ImportConfig) -> Result<(), String> { pub mod tests { use super::*; use crate::create_validators::tests::TestBuilder as CreateTestBuilder; - use std::fs; + use std::fs::{self, File}; use tempfile::{tempdir, TempDir}; - use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; const VC_TOKEN_FILE_NAME: &str = "vc_token.json"; @@ -279,10 +412,18 @@ pub mod tests { Self { import_config: ImportConfig { // This field will be overwritten later on. - validators_file_path: dir.path().into(), + validators_file_path: Some(dir.path().into()), + keystore_file_path: Some(dir.path().into()), vc_url: vc.url.clone(), vc_token_path, ignore_duplicates: false, + password: Some(Zeroizing::new("password".into())), + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }, vc, create_dir: None, @@ -295,6 +436,10 @@ pub mod tests { self } + pub fn get_import_config(&self) -> ImportConfig { + self.import_config.clone() + } + pub async fn create_validators(mut self, count: u32, first_index: u32) -> Self { let create_result = CreateTestBuilder::default() .mutate_config(|config| { @@ -307,7 +452,55 @@ pub mod tests { create_result.result.is_ok(), "precondition: validators are created" ); - self.import_config.validators_file_path = create_result.validators_file_path(); + self.import_config.validators_file_path = Some(create_result.validators_file_path()); + self.create_dir = Some(create_result.output_dir); + self + } + + // Keystore JSON requires a different format when creating valdiators + pub async fn create_validators_keystore_format( + mut self, + count: u32, + first_index: u32, + ) -> Self { + let create_result = CreateTestBuilder::default() + .mutate_config(|config| { + config.count = count; + config.first_index = first_index; + }) + .run_test() + .await; + assert!( + create_result.result.is_ok(), + "precondition: validators are created" + ); + + let validators_file_path = create_result.validators_file_path(); + + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(&validators_file_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e)) + .unwrap(); + + let validators: Vec = serde_json::from_reader(&validators_file) + .map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_file_path, e + ) + }) + .unwrap(); + + let validator = &validators[0]; + let validator_json = validator.voting_keystore.0.clone(); + + let keystore_file = File::create(&validators_file_path).unwrap(); + let _ = validator_json.to_json_writer(keystore_file); + + self.import_config.keystore_file_path = Some(create_result.validators_file_path()); + self.import_config.password = Some(validator.voting_keystore_password.clone()); self.create_dir = Some(create_result.output_dir); self } @@ -327,7 +520,8 @@ pub mod tests { let local_validators: Vec = { let contents = - fs::read_to_string(&self.import_config.validators_file_path).unwrap(); + fs::read_to_string(self.import_config.validators_file_path.unwrap()) + .unwrap(); serde_json::from_str(&contents).unwrap() }; let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; @@ -355,6 +549,39 @@ pub mod tests { vc: self.vc, } } + + pub async fn run_test_keystore_format(self) -> TestResult { + let result = run(self.import_config.clone()).await; + + if result.is_ok() { + self.vc.ensure_key_cache_consistency().await; + + let local_keystore: Keystore = + Keystore::from_json_file(self.import_config.keystore_file_path.unwrap()) + .unwrap(); + + let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; + + assert_eq!( + 1, + list_keystores_response.len(), + "vc should have exactly the number of validators imported" + ); + + let local_pubkey = local_keystore.public_key().unwrap().into(); + let remote_validator = list_keystores_response + .iter() + .find(|validator| validator.validating_pubkey == local_pubkey) + .expect("validator must exist on VC"); + assert_eq!(&remote_validator.derivation_path, &local_keystore.path()); + assert_eq!(remote_validator.readonly, Some(false)); + } + + TestResult { + result, + vc: self.vc, + } + } } #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. @@ -445,4 +672,66 @@ pub mod tests { .await .assert_ok(); } + + #[tokio::test] + async fn create_one_validator_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + // Set validators_file_path to None so that keystore_file_path is used for tests with the keystore format + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .run_test_keystore_format() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_one_validator_with_offset_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 42) + .await + .run_test_keystore_format() + .await + .assert_ok(); + } + + #[tokio::test] + async fn import_duplicates_when_disallowed_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .import_validators_without_checks() + .await + .run_test_keystore_format() + .await + .assert_err_contains("DuplicateValidator"); + } + + #[tokio::test] + async fn import_duplicates_when_allowed_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.ignore_duplicates = true; + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .import_validators_without_checks() + .await + .run_test_keystore_format() + .await + .assert_ok(); + } } diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 222dd7076d..8e43cd5977 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -8,7 +8,9 @@ use types::EthSpec; pub mod common; pub mod create_validators; +pub mod delete_validators; pub mod import_validators; +pub mod list_validators; pub mod move_validators; pub const CMD: &str = "validator_manager"; @@ -51,11 +53,14 @@ pub fn cli_app() -> Command { .help("Prints help information") .action(ArgAction::HelpLong) .display_order(0) - .help_heading(FLAG_HEADER), + .help_heading(FLAG_HEADER) + .global(true), ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) + .subcommand(list_validators::cli_app()) + .subcommand(delete_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. @@ -83,6 +88,13 @@ pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), Some((move_validators::CMD, matches)) => { move_validators::cli_run(matches, dump_config).await } + Some((list_validators::CMD, matches)) => { + list_validators::cli_run(matches, dump_config).await + } + Some((delete_validators::CMD, matches)) => { + delete_validators::cli_run(matches, dump_config).await + } + Some(("", _)) => Err("No command supplied. See --help.".to_string()), Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs new file mode 100644 index 0000000000..e3deb0b21a --- /dev/null +++ b/validator_manager/src/list_validators.rs @@ -0,0 +1,201 @@ +use clap::{Arg, ArgAction, ArgMatches, Command}; +use eth2::lighthouse_vc::types::SingleKeystoreResponse; +use eth2::SensitiveUrl; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +use crate::{common::vc_http_client, DumpConfig}; + +pub const CMD: &str = "list"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; + +pub fn cli_app() -> Command { + Command::new(CMD) + .about("Lists all validators in a validator client using the HTTP API.") + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct ListConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, +} + +impl ListConfig { + fn from_cli(matches: &ArgMatches) -> Result { + Ok(Self { + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + }) + } +} + +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { + let config = ListConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await?; + Ok(()) + } +} + +async fn run<'a>(config: ListConfig) -> Result, String> { + let ListConfig { + vc_url, + vc_token_path, + } = config; + + let (_, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + println!("List of validators ({}):", validators.len()); + + for validator in &validators { + println!("{}", validator.validating_pubkey); + } + + Ok(validators) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use std::{ + fs::{self, File}, + io::Write, + }; + + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; + + struct TestBuilder { + list_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + } + + impl TestBuilder { + async fn new() -> Self { + Self { + list_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + } + } + + async fn with_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.list_config = Some(ListConfig { + vc_url: builder.get_import_config().vc_url, + vc_token_path: builder.get_import_config().vc_token_path, + }); + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = { + let contents = + fs::read_to_string(builder.get_import_config().validators_file_path.unwrap()) + .unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self + } + + pub async fn run_test(self) -> TestResult { + let import_test_result = self.src_import_builder.unwrap().run_test().await; + assert!(import_test_result.result.is_ok()); + + let path = self.list_config.clone().unwrap().vc_token_path; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + let result = run(self.list_config.clone().unwrap()).await; + + if result.is_ok() { + let result_ref = result.as_ref().unwrap(); + + for local_validator in &self.validators { + let local_keystore = &local_validator.voting_keystore.0; + let local_pubkey = local_keystore.public_key().unwrap(); + assert!( + result_ref + .iter() + .any(|validator| validator.validating_pubkey + == local_pubkey.clone().into()), + "local validator pubkey not found in result" + ); + } + + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn list_all_validators() { + TestBuilder::new() + .await + .with_validators(3, 0) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 91bc2b0ef8..4d0820f5a8 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,8 +1,7 @@ use super::common::*; use crate::DumpConfig; -use account_utils::{read_password_from_user, ZeroizeString}; +use account_utils::read_password_from_user; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::{ std_types::{ @@ -20,6 +19,7 @@ use std::str::FromStr; use std::time::Duration; use tokio::time::sleep; use types::{Address, PublicKeyBytes}; +use zeroize::Zeroizing; pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; pub const VALIDATOR_SPECIFICATION_FILE: &str = "validator-specification.json"; @@ -49,7 +49,7 @@ pub enum PasswordSource { } impl PasswordSource { - fn read_password(&mut self, pubkey: &PublicKeyBytes) -> Result { + fn read_password(&mut self, pubkey: &PublicKeyBytes) -> Result, String> { match self { PasswordSource::Interactive { stdin_inputs } => { eprintln!("Please enter a password for keystore {:?}:", pubkey); @@ -75,15 +75,6 @@ pub fn cli_app() -> Command { command. This command only supports validators signing via a keystore on the local \ file system (i.e., not Web3Signer validators).", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(SRC_VC_URL_FLAG) .long(SRC_VC_URL_FLAG) @@ -678,7 +669,7 @@ mod test { use account_utils::validator_definitions::SigningDefinition; use std::fs; use tempfile::{tempdir, TempDir}; - use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; const SRC_VC_TOKEN_FILE_NAME: &str = "src_vc_token.json"; const DEST_VC_TOKEN_FILE_NAME: &str = "dest_vc_token.json"; @@ -987,13 +978,13 @@ mod test { }) .unwrap(); // Set all definitions to use the same password path as the primary. - definitions.iter_mut().enumerate().for_each(|(_, def)| { - match &mut def.signing_definition { - SigningDefinition::LocalKeystore { - voting_keystore_password_path: Some(path), - .. - } => *path = primary_path.clone(), - _ => (), + definitions.iter_mut().for_each(|def| { + if let SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } = &mut def.signing_definition + { + *path = primary_path.clone() } }) } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 9e8da3b293..41cfb58e28 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -10,37 +10,36 @@ path = "src/lib.rs" [[bin]] name = "watch" path = "src/main.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +axum = "0.7" +beacon_node = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -log = { workspace = true } -env_logger = { workspace = true } -types = { workspace = true } -eth2 = { workspace = true } -beacon_node = { workspace = true } -tokio = { workspace = true } -axum = "0.7" -hyper = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -rand = { workspace = true } diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } diesel_migrations = { version = "2.0.0", features = ["postgres"] } -bls = { workspace = true } +env_logger = { workspace = true } +eth2 = { workspace = true } +hyper = { workspace = true } +log = { workspace = true } r2d2 = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } serde_yaml = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } [dev-dependencies] -tokio-postgres = "0.7.5" -http_api = { workspace = true } beacon_chain = { workspace = true } -network = { workspace = true } -testcontainers = "0.15" -unused_port = { workspace = true } -task_executor = { workspace = true } +http_api = { workspace = true } logging = { workspace = true } +network = { workspace = true } +task_executor = { workspace = true } +testcontainers = "0.15" +tokio-postgres = "0.7.5" +unused_port = { workspace = true } diff --git a/watch/README.md b/watch/README.md index 34519e52e5..877cddf234 100644 --- a/watch/README.md +++ b/watch/README.md @@ -39,8 +39,6 @@ diesel database reset --database-url postgres://postgres:postgres@localhost/dev 1. Ensure a synced Lighthouse beacon node with historical states is available at `localhost:5052`. -The smaller the value of `--slots-per-restore-point` the faster beacon.watch -will be able to sync to the beacon node. 1. Run the updater daemon: ``` diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs index b31583c629..7193b0744a 100644 --- a/watch/src/database/mod.rs +++ b/watch/src/database/mod.rs @@ -109,9 +109,9 @@ pub fn get_active_config(conn: &mut PgConn) -> Result, Err .optional()?) } -/// -/// INSERT statements -/// +/* + * INSERT statements + */ /// Inserts a single row into the `canonical_slots` table. /// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. @@ -245,9 +245,9 @@ pub fn insert_batch_validators( Ok(()) } -/// -/// SELECT statements -/// +/* + * SELECT statements + */ /// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. pub fn get_canonical_slot( @@ -746,9 +746,9 @@ pub fn count_validators_activated_before_slot( .map_err(Error::Database) } -/// -/// DELETE statements. -/// +/* + * DELETE statements. + */ /// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. /// diff --git a/wordlist.txt b/wordlist.txt new file mode 100644 index 0000000000..6287366cbc --- /dev/null +++ b/wordlist.txt @@ -0,0 +1,234 @@ +APIs +ARMv +AUR +Backends +Backfilling +Beaconcha +Besu +Broadwell +BIP +BLS +BN +BNs +BTC +BTEC +Casper +CentOS +Chiado +CMake +CoinCashew +Consensys +CORS +CPUs +DBs +DES +DHT +DNS +Dockerhub +DoS +EIP +ENR +Erigon +Esat's +ETH +EthDocker +Ethereum +Ethstaker +Exercism +Extractable +FFG +Geth +Gitcoin +Gnosis +Goerli +Grafana +Holesky +Homebrew +Infura +IPs +IPv +JSON +KeyManager +Kurtosis +LMDB +LLVM +LRU +LTO +Mainnet +MDBX +Merkle +MEV +MSRV +NAT's +Nethermind +NodeJS +NullLogger +PathBuf +PowerShell +PPA +Pre +Proto +PRs +Prysm +QUIC +RasPi +README +RESTful +Reth +RHEL +Ropsten +RPC +Ryzen +Sepolia +Somer +SSD +SSL +SSZ +Styleguide +TCP +Teku +TLS +TODOs +UDP +UI +UPnP +USD +UX +Validator +VC +VCs +VPN +Withdrawable +WSL +YAML +aarch +anonymize +api +attester +backend +backends +backfill +backfilling +beaconcha +bitfield +blockchain +bn +cli +clippy +config +cpu +cryptocurrencies +cryptographic +danksharding +datadir +datadirs +de +decrypt +decrypted +dest +dir +disincentivise +doppelgänger +dropdown +else's +env +eth +ethdo +ethereum +ethstaker +filesystem +frontend +gapped +github +graffitis +gwei +hdiffs +homebrew +hostname +html +http +https +hDiff +implementers +interoperable +io +iowait +jemalloc +json +jwt +kb +keymanager +keypair +keypairs +keystore +keystores +linter +linux +localhost +lossy +macOS +mainnet +makefile +mdBook +mev +misconfiguration +mkcert +namespace +natively +nd +ness +nginx +nitty +oom +orging +orgs +os +paul +pem +performant +pid +pre +pubkey +pubkeys +rc +reimport +resync +roadmap +rustfmt +rustup +schemas +sigmaprime +sigp +slashable +slashings +spec'd +src +stakers +subnet +subnets +systemd +testnet +testnets +th +toml +topologies +tradeoffs +transactional +tweakers +ui +unadvanced +unaggregated +unencrypted +unfinalized +untrusted +uptimes +url +validator +validators +validator's +vc +virt +webapp +withdrawable +yaml +yml