diff --git a/.github/forbidden-files.txt b/.github/forbidden-files.txt index ec89bd2e4b..b070067350 100644 --- a/.github/forbidden-files.txt +++ b/.github/forbidden-files.txt @@ -5,3 +5,10 @@ beacon_node/beacon_chain/src/otb_verification_service.rs beacon_node/store/src/partial_beacon_state.rs beacon_node/store/src/consensus_context.rs +beacon_node/beacon_chain/src/block_reward.rs +beacon_node/http_api/src/attestation_performance.rs +beacon_node/http_api/src/block_packing_efficiency.rs +beacon_node/http_api/src/block_rewards.rs +common/eth2/src/lighthouse/attestation_performance.rs +common/eth2/src/lighthouse/block_packing_efficiency.rs +common/eth2/src/lighthouse/block_rewards.rs diff --git a/.github/workflows/docker-reproducible.yml b/.github/workflows/docker-reproducible.yml index f3479e9468..7e46fc691b 100644 --- a/.github/workflows/docker-reproducible.yml +++ b/.github/workflows/docker-reproducible.yml @@ -4,7 +4,6 @@ on: push: branches: - unstable - - stable tags: - v* workflow_dispatch: # allows manual triggering for testing purposes and skips publishing an image @@ -25,9 +24,6 @@ jobs: if [[ "${{ github.ref }}" == refs/tags/* ]]; then # It's a tag (e.g., v1.2.3) VERSION="${GITHUB_REF#refs/tags/}" - elif [[ "${{ github.ref }}" == refs/heads/stable ]]; then - # stable branch -> latest - VERSION="latest" elif [[ "${{ github.ref }}" == refs/heads/unstable ]]; then # unstable branch -> latest-unstable VERSION="latest-unstable" @@ -174,3 +170,14 @@ jobs: ${IMAGE_NAME}:${VERSION}-arm64 docker manifest push ${IMAGE_NAME}:${VERSION} + + # For version tags, also create/update the latest tag to keep stable up to date + # Only create latest tag for proper release versions (e.g. v1.2.3, not v1.2.3-alpha) + if [[ "${GITHUB_REF}" == refs/tags/* ]] && [[ "${VERSION}" =~ ^v[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}$ ]]; then + docker manifest create \ + ${IMAGE_NAME}:latest \ + ${IMAGE_NAME}:${VERSION}-amd64 \ + ${IMAGE_NAME}:${VERSION}-arm64 + + docker manifest push ${IMAGE_NAME}:latest + fi diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 415f4db0e6..e3f6e5d8b8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -4,7 +4,6 @@ on: push: branches: - unstable - - stable tags: - v* @@ -28,11 +27,6 @@ jobs: extract-version: runs-on: ubuntu-22.04 steps: - - name: Extract version (if stable) - if: github.event.ref == 'refs/heads/stable' - run: | - echo "VERSION=latest" >> $GITHUB_ENV - echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if unstable) if: github.event.ref == 'refs/heads/unstable' run: | @@ -159,7 +153,16 @@ jobs: - name: Create and push multiarch manifests run: | + # Create the main tag (versioned for releases, latest-unstable for unstable) docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + # For version tags, also create/update the latest tag to keep stable up to date + # Only create latest tag for proper release versions (e.g. v1.2.3, not v1.2.3-alpha) + if [[ "${GITHUB_REF}" == refs/tags/* ]] && [[ "${VERSION}" =~ ^v[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}$ ]]; then + docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:latest \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + fi + diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 9992273e0a..308ddcf819 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -38,7 +38,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -106,7 +106,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -142,7 +142,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -185,7 +185,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -227,7 +227,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4cad219c89..f81f75cd8b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,6 +37,15 @@ Requests](https://github.com/sigp/lighthouse/pulls) is where code gets reviewed. We use [discord](https://discord.gg/cyAszAh) to chat informally. +### A Note on LLM usage + +We are happy to support contributors who are genuinely engaging with the code base. Our general policy regarding LLM usage: + +- Please refrain from submissions that you haven't thoroughly understood, reviewed, and tested. +- Please disclose if a significant portion of your contribution was AI-generated. +- Descriptions and comments should be made by you. +- We reserve the right to reject any contributions we feel are violating the spirit of open source contribution. + ### General Work-Flow We recommend the following work-flow for contributors: diff --git a/Cargo.lock b/Cargo.lock index fac8f42232..a6dd3332d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.1.1" +version = "8.1.3" dependencies = [ "account_utils", "bls", @@ -408,7 +408,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -519,7 +519,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -535,7 +535,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "syn-solidity", "tiny-keccak", ] @@ -552,7 +552,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "syn-solidity", ] @@ -641,7 +641,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -824,7 +824,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -862,7 +862,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -973,7 +973,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "synstructure", ] @@ -985,7 +985,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1064,7 +1064,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1075,7 +1075,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1117,7 +1117,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1285,7 +1285,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.1.1" +version = "8.1.3" dependencies = [ "account_utils", "beacon_chain", @@ -1385,7 +1385,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1405,7 +1405,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1548,7 +1548,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.1.1" +version = "8.1.3" dependencies = [ "beacon_node", "bytes", @@ -1588,7 +1588,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1864,7 +1864,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1985,7 +1985,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92ff1dbbda10d495b2c92749c002b2025e0be98f42d1741ecc9ff820d2f04dce" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2097,7 +2097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b7bf98c48ffa511b14bb3c76202c24a8742cea1efa9570391c5d41373419a09" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2331,7 +2331,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2366,7 +2366,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2379,7 +2379,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2390,7 +2390,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2401,7 +2401,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2440,15 +2440,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] name = "data-encoding-macro" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +checksum = "8142a83c17aa9461d637e649271eae18bf2edd00e91f2e105df36c3c16355bdb" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2456,12 +2456,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2550,7 +2550,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2582,7 +2582,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2595,7 +2595,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2617,7 +2617,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.117", "unicode-xid", ] @@ -2733,7 +2733,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2828,7 +2828,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3051,7 +3051,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3071,7 +3071,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3301,7 +3301,7 @@ dependencies = [ "darling 0.23.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3705,7 +3705,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3966,15 +3966,6 @@ dependencies = [ "hashbrown 0.14.5", ] -[[package]] -name = "hashlink" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" -dependencies = [ - "hashbrown 0.15.5", -] - [[package]] name = "hashlink" version = "0.11.0" @@ -4591,7 +4582,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -4834,7 +4825,6 @@ name = "kzg" version = "0.1.0" dependencies = [ "arbitrary", - "c-kzg", "criterion", "educe", "ethereum_hashing", @@ -4867,7 +4857,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.1.1" +version = "8.1.3" dependencies = [ "account_utils", "beacon_chain", @@ -4928,9 +4918,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.178" +version = "0.2.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" [[package]] name = "libloading" @@ -4966,7 +4956,7 @@ dependencies = [ [[package]] name = "libp2p" version = "0.56.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "bytes", "either", @@ -4997,7 +4987,7 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" version = "0.6.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5007,7 +4997,7 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" version = "0.6.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5017,7 +5007,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.43.2" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "either", "fnv", @@ -5040,10 +5030,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.44.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +version = "0.45.0" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ - "async-trait", "futures", "hickory-resolver", "libp2p-core", @@ -5056,7 +5045,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.50.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "async-channel 2.5.0", "asynchronous-codec", @@ -5068,7 +5057,7 @@ dependencies = [ "futures", "futures-timer", "getrandom 0.2.16", - "hashlink 0.10.0", + "hashlink 0.11.0", "hex_fmt", "libp2p-core", "libp2p-identity", @@ -5086,7 +5075,7 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.47.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "either", @@ -5126,7 +5115,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.48.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "hickory-proto", @@ -5144,7 +5133,7 @@ dependencies = [ [[package]] name = "libp2p-metrics" version = "0.17.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "libp2p-core", @@ -5160,7 +5149,7 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.43.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "bytes", @@ -5178,7 +5167,7 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.46.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "bytes", @@ -5200,7 +5189,7 @@ dependencies = [ [[package]] name = "libp2p-quic" version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-timer", @@ -5220,14 +5209,14 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.47.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +version = "0.47.1" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "either", "fnv", "futures", "futures-timer", - "hashlink 0.10.0", + "hashlink 0.11.0", "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", @@ -5242,17 +5231,17 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.35.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "heck", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] name = "libp2p-tcp" version = "0.44.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-timer", @@ -5267,7 +5256,7 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.6.2" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-rustls", @@ -5276,7 +5265,7 @@ dependencies = [ "rcgen", "ring", "rustls 0.23.35", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.10", "thiserror 2.0.17", "x509-parser", "yasna", @@ -5285,7 +5274,7 @@ dependencies = [ [[package]] name = "libp2p-upnp" version = "0.6.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-timer", @@ -5299,7 +5288,7 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.47.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "either", "futures", @@ -5307,7 +5296,7 @@ dependencies = [ "thiserror 2.0.17", "tracing", "yamux 0.12.1", - "yamux 0.13.8", + "yamux 0.13.10", ] [[package]] @@ -5353,7 +5342,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.1.1" +version = "8.1.3" dependencies = [ "account_manager", "account_utils", @@ -5485,7 +5474,7 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.1.1" +version = "8.1.3" dependencies = [ "regex", ] @@ -5625,7 +5614,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5653,7 +5642,7 @@ checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5740,7 +5729,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5846,7 +5835,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5858,7 +5847,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5973,7 +5962,7 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "bytes", "futures", @@ -6293,7 +6282,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6542,7 +6531,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6642,7 +6631,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6845,7 +6834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6887,7 +6876,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6954,7 +6943,7 @@ checksum = "9adf1691c04c0a5ff46ff8f262b58beb07b0dbb61f96f9f54f6cbd82106ed87f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6984,7 +6973,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -7007,7 +6996,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -7061,7 +7050,7 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-protobuf" version = "0.8.1" -source = "git+https://github.com/sigp/quick-protobuf.git?rev=681f413312404ab6e51f0b46f39b0075c6f4ebfd#681f413312404ab6e51f0b46f39b0075c6f4ebfd" +source = "git+https://github.com/sigp/quick-protobuf.git?rev=87c4ccb9bb2af494de375f5f6c62850badd26304#87c4ccb9bb2af494de375f5f6c62850badd26304" dependencies = [ "byteorder", ] @@ -7069,7 +7058,7 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" version = "0.3.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "bytes", @@ -7101,9 +7090,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" dependencies = [ "bytes", "getrandom 0.3.4", @@ -7354,14 +7343,14 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -7680,7 +7669,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.10", "subtle", "zeroize", ] @@ -7729,9 +7718,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "ring", "rustls-pki-types", @@ -7759,7 +7748,7 @@ dependencies = [ [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "pin-project", @@ -7995,7 +7984,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8019,7 +8008,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8062,7 +8051,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8533,7 +8522,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8553,7 +8542,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8579,9 +8568,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -8597,7 +8586,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8617,7 +8606,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8735,7 +8724,7 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8764,7 +8753,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8775,7 +8764,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8932,9 +8921,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" dependencies = [ "bytes", "libc", @@ -8956,7 +8945,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9196,7 +9185,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9299,7 +9288,7 @@ dependencies = [ "darling 0.23.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9534,7 +9523,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "8.1.1" +version = "8.1.3" dependencies = [ "account_utils", "beacon_node_fallback", @@ -9723,6 +9712,7 @@ version = "0.1.0" dependencies = [ "bls", "eth2", + "futures", "slashing_protection", "types", ] @@ -9899,7 +9889,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "wasm-bindgen-shared", ] @@ -10097,7 +10087,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10108,7 +10098,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10520,8 +10510,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.8" -source = "git+https://github.com/sigp/rust-yamux?rev=575b17c0f44f4253079a6bafaa2de74ca1d6dfaa#575b17c0f44f4253079a6bafaa2de74ca1d6dfaa" +version = "0.13.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1991f6690292030e31b0144d73f5e8368936c58e45e7068254f7138b23b00672" dependencies = [ "futures", "log", @@ -10561,7 +10552,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "synstructure", ] @@ -10582,7 +10573,7 @@ checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10602,7 +10593,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "synstructure", ] @@ -10624,7 +10615,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10657,7 +10648,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 667ba1f803..340b650bca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,7 @@ resolver = "2" [workspace.package] edition = "2024" -version = "8.1.1" +version = "8.1.3" [workspace.dependencies] account_utils = { path = "common/account_utils" } @@ -117,9 +117,6 @@ bitvec = "1" bls = { path = "crypto/bls" } byteorder = "1" bytes = "1.11.1" -# Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable -# feature ourselves when desired. -c-kzg = { version = "2.1", default-features = false } cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } @@ -166,20 +163,7 @@ initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } itertools = "0.14" kzg = { path = "crypto/kzg" } -libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = [ - "identify", - "yamux", - "noise", - "dns", - "tcp", - "tokio", - "secp256k1", - "macros", - "metrics", - "quic", - "upnp", - "gossipsub", -] } +libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "secp256k1", "macros", "metrics", "quic", "upnp", "gossipsub"] } libsecp256k1 = "0.7" lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_validator_store = { path = "validator_client/lighthouse_validator_store" } @@ -219,12 +203,7 @@ r2d2 = "0.8" rand = "0.9.0" rayon = "1.7" regex = "1" -reqwest = { version = "0.12", default-features = false, features = [ - "blocking", - "json", - "stream", - "rustls-tls", -] } +reqwest = { version = "0.12", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } ring = "0.17" rpds = "0.11" rusqlite = { version = "0.38", features = ["bundled"] } @@ -253,12 +232,7 @@ sysinfo = "0.26" system_health = { path = "common/system_health" } task_executor = { path = "common/task_executor" } tempfile = "3" -tokio = { version = "1", features = [ - "rt-multi-thread", - "sync", - "signal", - "macros", -] } +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal", "macros"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" @@ -301,5 +275,8 @@ inherits = "release" debug = true [patch.crates-io] -quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } -yamux = { git = "https://github.com/sigp/rust-yamux", rev = "575b17c0f44f4253079a6bafaa2de74ca1d6dfaa" } +quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "87c4ccb9bb2af494de375f5f6c62850badd26304" } + +[patch."https://github.com/libp2p/rust-libp2p.git"] +libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" } +libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p.git", rev = "defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" } diff --git a/Makefile b/Makefile index 9786c17cc9..599c1a8791 100644 --- a/Makefile +++ b/Makefile @@ -321,8 +321,8 @@ make-ef-tests-nightly: # Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES) - cargo check -p slashing_protection --features arbitrary-fuzz,$(TEST_FEATURES) + cargo check -p state_processing --features arbitrary,$(TEST_FEATURES) + cargo check -p slashing_protection --features arbitrary,$(TEST_FEATURES) # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: install-audit audit-CI @@ -331,7 +331,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit + cargo audit --ignore RUSTSEC-2026-0049 # Runs cargo deny (check for banned crates, duplicate versions, and source restrictions) deny: install-deny deny-CI diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 8dd50cbc6e..05e6f12554 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "account_manager" version = { workspace = true } -authors = [ - "Paul Hauner ", - "Luke Anderson ", -] +authors = ["Paul Hauner ", "Luke Anderson "] edition = { workspace = true } [dependencies] diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5352814dd5..ebefa6a451 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "beacon_node" version = { workspace = true } -authors = [ - "Paul Hauner ", - "Age Manning ", "Age Manning ; /// Alias to appease clippy. -type HashBlockTuple = (Hash256, RpcBlock); +type HashBlockTuple = (Hash256, RangeSyncBlock); // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO; @@ -1138,56 +1138,19 @@ impl BeaconChain { .map_or_else(|| self.get_blobs(block_root), Ok) } - /// Returns the execution payload envelopes at the given roots, if any. - /// - /// Will also check any associated caches. The expected use for this function is *only* for returning blocks requested - /// from P2P peers. - /// - /// ## Errors - /// - /// May return a database error. - #[allow(clippy::type_complexity)] - pub fn get_payload_envelopes_checking_caches( - self: &Arc, - block_roots: Vec, - ) -> Result< - impl Stream< - Item = ( - Hash256, - Arc>>, Error>>, - ), - >, - Error, - > { - Ok(PayloadEnvelopeStreamer::::new( - self.execution_layer.clone(), - self.store.clone(), - self.task_executor.clone(), - CheckCaches::Yes, - )? - .launch_stream(block_roots)) - } - + #[cfg(not(test))] #[allow(clippy::type_complexity)] pub fn get_payload_envelopes( self: &Arc, block_roots: Vec, - ) -> Result< - impl Stream< - Item = ( - Hash256, - Arc>>, Error>>, - ), - >, - Error, + request_source: EnvelopeRequestSource, + ) -> impl Stream< + Item = ( + Hash256, + Arc>>, Error>>, + ), > { - Ok(PayloadEnvelopeStreamer::::new( - self.execution_layer.clone(), - self.store.clone(), - self.task_executor.clone(), - CheckCaches::No, - )? - .launch_stream(block_roots)) + launch_payload_envelope_stream(self.clone(), block_roots, request_source) } pub fn get_data_columns_checking_all_caches( @@ -2109,6 +2072,8 @@ impl BeaconChain { // required information. (justified_checkpoint, committee_len) } else { + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (advanced_state_root, mut state) = self .store .get_advanced_hot_state( @@ -2832,7 +2797,7 @@ impl BeaconChain { /// This method is potentially long-running and should not run on the core executor. pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, + chain_segment: Vec>, ) -> Result>, Box> { // This function will never import any blocks. let imported_blocks = vec![]; @@ -2941,7 +2906,7 @@ impl BeaconChain { /// `Self::process_block`. pub async fn process_chain_segment( self: &Arc, - chain_segment: Vec>, + chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { for block in chain_segment.iter() { @@ -4135,23 +4100,10 @@ impl BeaconChain { // See https://github.com/sigp/lighthouse/issues/2028 let (_, signed_block, block_data) = signed_block.deconstruct(); - match self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) { - Ok(Some(blobs_or_columns_store_op)) => { - ops.push(blobs_or_columns_store_op); - } - Ok(None) => {} - Err(e) => { - error!( - msg = "Restoring fork choice from disk", - error = &e, - ?block_root, - "Failed to store data columns into the database" - ); - return Err(self - .handle_import_block_db_write_error(fork_choice) - .err() - .unwrap_or(BlockError::InternalError(e))); - } + if let Some(blobs_or_columns_store_op) = + self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) + { + ops.push(blobs_or_columns_store_op); } let block = signed_block.message(); @@ -4181,7 +4133,7 @@ impl BeaconChain { // We're declaring the block "imported" at this point, since fork choice and the DB know // about it. - let block_time_imported = timestamp_now(); + let block_time_imported = self.slot_clock.now_duration().unwrap_or(Duration::MAX); // compute state proofs for light client updates before inserting the state into the // snapshot cache. @@ -4768,6 +4720,8 @@ impl BeaconChain { if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) } else { + // TODO(gloas): this function needs updating to be envelope-aware + // See: https://github.com/sigp/lighthouse/issues/8957 let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; @@ -6567,11 +6521,11 @@ impl BeaconChain { ) -> Result { crate::beacon_proposer_cache::with_proposer_cache( &self.beacon_proposer_cache, - &self.spec, shuffling_decision_block, proposal_epoch, accessor, state_provider, + &self.spec, ) } @@ -6710,6 +6664,8 @@ impl BeaconChain { let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (state_root, state) = self .store .get_advanced_hot_state( @@ -6784,6 +6740,9 @@ impl BeaconChain { let mut prev_block_root = None; let mut prev_beacon_state = None; + // Collect all blocks. + let mut blocks = vec![]; + for res in self.forwards_iter_block_roots(from_slot)? { let (beacon_block_root, _) = res?; @@ -6799,16 +6758,42 @@ impl BeaconChain { .ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; - let beacon_state_root = beacon_block.state_root(); + blocks.push((beacon_block_root, Arc::new(beacon_block))); + } + + // Collect states, using the next blocks to determine if states are full (have Gloas + // payloads). + for (i, (block_root, block)) in blocks.iter().enumerate() { + let (opt_envelope, state_root) = if block.fork_name_unchecked().gloas_enabled() { + let opt_envelope = self.store.get_payload_envelope(block_root)?.map(Arc::new); + + if let Some((_, next_block)) = blocks.get(i + 1) { + let block_hash = block.payload_bid_block_hash()?; + if next_block.is_parent_block_full(block_hash) { + let envelope = opt_envelope.ok_or_else(|| { + Error::DBInconsistent(format!("Missing envelope {block_root:?}")) + })?; + let state_root = envelope.message.state_root; + (Some(envelope), state_root) + } else { + (None, block.state_root()) + } + } else { + // TODO(gloas): should use fork choice/cached head for last block in sequence + opt_envelope + .as_ref() + .map_or((None, block.state_root()), |envelope| { + (Some(envelope.clone()), envelope.message.state_root) + }) + } + } else { + (None, block.state_root()) + }; - // This branch is reached from the HTTP API. We assume the user wants - // to cache states so that future calls are faster. let mut beacon_state = self .store - .get_state(&beacon_state_root, Some(beacon_block.slot()), true)? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root)) - })?; + .get_state(&state_root, Some(block.slot()), true)? + .ok_or_else(|| Error::DBInconsistent(format!("Missing state {:?}", state_root)))?; // This beacon state might come from the freezer DB, which means it could have pending // updates or lots of untethered memory. We rebase it on the previous state in order to @@ -6821,12 +6806,14 @@ impl BeaconChain { prev_beacon_state = Some(beacon_state.clone()); let snapshot = BeaconSnapshot { - beacon_block: Arc::new(beacon_block), - beacon_block_root, + beacon_block: block.clone(), + execution_envelope: opt_envelope, + beacon_block_root: *block_root, beacon_state, }; dump.push(snapshot); } + Ok(dump) } @@ -7253,16 +7240,16 @@ impl BeaconChain { block_root: Hash256, block_slot: Slot, block_data: AvailableBlockData, - ) -> Result>, String> { + ) -> Option> { match block_data { - AvailableBlockData::NoData => Ok(None), + AvailableBlockData::NoData => None, AvailableBlockData::Blobs(blobs) => { debug!( %block_root, count = blobs.len(), "Writing blobs to store" ); - Ok(Some(StoreOp::PutBlobs(block_root, blobs))) + Some(StoreOp::PutBlobs(block_root, blobs)) } AvailableBlockData::DataColumns(mut data_columns) => { let columns_to_custody = self.custody_columns_for_epoch(Some( @@ -7278,7 +7265,7 @@ impl BeaconChain { count = data_columns.len(), "Writing data columns to store" ); - Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))) + Some(StoreOp::PutDataColumns(block_root, data_columns)) } } } diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 60487f9c46..95fde28f5b 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -231,35 +231,6 @@ where } } - /// Restore `Self` from a previously-generated `PersistedForkChoiceStore`. - /// - /// DEPRECATED. Can be deleted once migrations no longer require it. - pub fn from_persisted_v17( - persisted: PersistedForkChoiceStoreV17, - justified_state_root: Hash256, - unrealized_justified_state_root: Hash256, - store: Arc>, - ) -> Result { - let justified_balances = - JustifiedBalances::from_effective_balances(persisted.justified_balances)?; - - Ok(Self { - store, - balances_cache: <_>::default(), - time: persisted.time, - finalized_checkpoint: persisted.finalized_checkpoint, - justified_checkpoint: persisted.justified_checkpoint, - justified_balances, - justified_state_root, - unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, - unrealized_justified_state_root, - unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, - proposer_boost_root: persisted.proposer_boost_root, - equivocating_indices: persisted.equivocating_indices, - _phantom: PhantomData, - }) - } - /// Restore `Self` from a previously-generated `PersistedForkChoiceStore`. pub fn from_persisted( persisted: PersistedForkChoiceStore, @@ -411,45 +382,15 @@ where pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV28; /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - /// The balances cache was removed from disk storage in schema V28. - #[superstruct(only(V17))] - pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, - /// The justified balances were removed from disk storage in schema V28. - #[superstruct(only(V17))] - pub justified_balances: Vec, - /// The justified state root is stored so that it can be used to load the justified balances. - #[superstruct(only(V28))] pub justified_state_root: Hash256, pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V28))] pub unrealized_justified_state_root: Hash256, pub unrealized_finalized_checkpoint: Checkpoint, pub proposer_boost_root: Hash256, pub equivocating_indices: BTreeSet, } - -// Convert V28 to V17 by adding balances and removing justified state roots. -impl From<(PersistedForkChoiceStoreV28, JustifiedBalances)> for PersistedForkChoiceStoreV17 { - fn from((v28, balances): (PersistedForkChoiceStoreV28, JustifiedBalances)) -> Self { - Self { - balances_cache: Default::default(), - time: v28.time, - finalized_checkpoint: v28.finalized_checkpoint, - justified_checkpoint: v28.justified_checkpoint, - justified_balances: balances.effective_balances, - unrealized_justified_checkpoint: v28.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: v28.unrealized_finalized_checkpoint, - proposer_boost_root: v28.proposer_boost_root, - equivocating_indices: v28.equivocating_indices, - } - } -} diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 141a79b202..b258d7471f 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -174,11 +174,11 @@ impl BeaconProposerCache { /// cache the proposers. pub fn with_proposer_cache( beacon_proposer_cache: &Mutex, - spec: &ChainSpec, shuffling_decision_block: Hash256, proposal_epoch: Epoch, accessor: impl Fn(&EpochBlockProposers) -> Result, state_provider: impl FnOnce() -> Result<(Hash256, BeaconState), Err>, + spec: &ChainSpec, ) -> Result where Spec: EthSpec, diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index e9fde48ac6..566713e3f3 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -2,7 +2,7 @@ use serde::Serialize; use std::sync::Arc; use types::{ AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, - SignedBlindedBeaconBlock, + SignedBlindedBeaconBlock, SignedExecutionPayloadEnvelope, }; /// Represents some block and its associated state. Generally, this will be used for tracking the @@ -10,6 +10,7 @@ use types::{ #[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot = FullPayload> { pub beacon_block: Arc>, + pub execution_envelope: Option>>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -31,33 +32,42 @@ impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( beacon_block: Arc>, + execution_envelope: Option>>, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { Self { beacon_block, + execution_envelope, beacon_block_root, beacon_state, } } - /// Returns the state root from `self.beacon_block`. + /// Returns the state root from `self.beacon_block` or `self.execution_envelope` as + /// appropriate. /// /// ## Caution /// /// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`. pub fn beacon_state_root(&self) -> Hash256 { - self.beacon_block.message().state_root() + if let Some(ref envelope) = self.execution_envelope { + envelope.message.state_root + } else { + self.beacon_block.message().state_root() + } } /// Update all fields of the checkpoint. pub fn update( &mut self, beacon_block: Arc>, + execution_envelope: Option>>, beacon_block_root: Hash256, beacon_state: BeaconState, ) { self.beacon_block = beacon_block; + self.execution_envelope = execution_envelope; self.beacon_block_root = beacon_block_root; self.beacon_state = beacon_state; } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index c257ba02ec..86b385d818 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -509,6 +509,8 @@ pub fn validate_blob_sidecar_for_gossip = ( - BeaconBlock>, - BeaconState, - ConsensusBlockValue, -); +type BlockProductionResult = (BeaconBlock, BeaconState, ConsensusBlockValue); pub type PreparePayloadResult = Result, BlockProductionError>; pub type PreparePayloadHandle = JoinHandle>>; @@ -429,6 +425,12 @@ impl BeaconChain { )) } + /// Complete a block by computing its state root, and + /// + /// Return `(block, pending_state, block_value)` where: + /// + /// - `pending_state` is the state post block application (prior to payload application) + /// - `block_value` is the consensus-layer rewards for `block` #[allow(clippy::type_complexity)] fn complete_partial_beacon_block_gloas( &self, @@ -761,8 +763,12 @@ fn get_execution_payload_gloas( let latest_execution_block_hash = *state.latest_block_hash()?; let latest_gas_limit = state.latest_execution_payload_bid()?.gas_limit; - let withdrawals = - Withdrawals::::from(get_expected_withdrawals(state, spec)?).into(); + let withdrawals = if state.is_parent_block_full() { + Withdrawals::::from(get_expected_withdrawals(state, spec)?).into() + } else { + // If the previous payload was missed, carry forward the withdrawals from the state. + state.payload_expected_withdrawals()?.to_vec() + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs deleted file mode 100644 index f3924bb473..0000000000 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ /dev/null @@ -1,140 +0,0 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; -use operation_pool::{ - AttMaxCover, MaxCover, PROPOSER_REWARD_DENOMINATOR, RewardCache, SplitAttestation, -}; -use state_processing::{ - common::get_attesting_indices_from_state, - per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, -}; -use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256}; - -impl BeaconChain { - pub fn compute_block_reward>( - &self, - block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, - state: &BeaconState, - reward_cache: &mut RewardCache, - include_attestations: bool, - ) -> Result { - if block.slot() != state.slot() { - return Err(BeaconChainError::BlockRewardSlotError); - } - - reward_cache.update(state)?; - - let total_active_balance = state.get_total_active_balance()?; - - let split_attestations = block - .body() - .attestations() - .map(|att| { - let attesting_indices = get_attesting_indices_from_state(state, att)?; - Ok(SplitAttestation::new( - att.clone_as_attestation(), - attesting_indices, - )) - }) - .collect::, BeaconChainError>>()?; - - let mut per_attestation_rewards = split_attestations - .iter() - .map(|att| { - AttMaxCover::new( - att.as_ref(), - state, - reward_cache, - total_active_balance, - &self.spec, - ) - .ok_or(BeaconChainError::BlockRewardAttestationError) - }) - .collect::, _>>()?; - - // Update the attestation rewards for each previous attestation included. - // This is O(n^2) in the number of attestations n. - for i in 0..per_attestation_rewards.len() { - let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1); - let latest_att = &updated[i]; - - for att in to_update { - att.update_covering_set(latest_att.intermediate(), latest_att.covering_set()); - } - } - - let mut prev_epoch_total = 0; - let mut curr_epoch_total = 0; - - for cover in &per_attestation_rewards { - if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() { - curr_epoch_total += cover.score() as u64; - } else { - prev_epoch_total += cover.score() as u64; - } - } - - let attestation_total = prev_epoch_total + curr_epoch_total; - - // Drop the covers. - let per_attestation_rewards = per_attestation_rewards - .into_iter() - .map(|cover| { - // Divide each reward numerator by the denominator. This can lead to the total being - // less than the sum of the individual rewards due to the fact that integer division - // does not distribute over addition. - let mut rewards = cover.fresh_validators_rewards; - rewards - .values_mut() - .for_each(|reward| *reward /= PROPOSER_REWARD_DENOMINATOR); - rewards - }) - .collect(); - - // Add the attestation data if desired. - let attestations = if include_attestations { - block - .body() - .attestations() - .map(|a| a.data().clone()) - .collect() - } else { - vec![] - }; - - let attestation_rewards = AttestationRewards { - total: attestation_total, - prev_epoch_total, - curr_epoch_total, - per_attestation_rewards, - attestations, - }; - - // Sync committee rewards. - let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() { - let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) - .map_err(|_| BeaconChainError::BlockRewardSyncError)?; - sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit - } else { - 0 - }; - - // Total, metadata - let total = attestation_total + sync_committee_rewards; - - let meta = BlockRewardMeta { - slot: block.slot(), - parent_slot: state.latest_block_header().slot, - proposer_index: block.proposer_index(), - graffiti: block.body().graffiti().as_utf8_lossy(), - }; - - Ok(BlockReward { - total, - block_root, - meta, - attestation_rewards, - sync_committee_rewards, - }) - } -} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 6109537bff..324f50d74c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -50,7 +50,7 @@ use crate::beacon_snapshot::PreProcessingSnapshot; use crate::blob_verification::GossipBlobError; -use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; +use crate::block_verification_types::{AsBlock, BlockImportData, LookupBlock, RangeSyncBlock}; use crate::data_availability_checker::{ AvailabilityCheckError, AvailableBlock, AvailableBlockData, MaybeAvailableBlock, }; @@ -592,7 +592,7 @@ pub(crate) fn process_block_slash_info( - mut chain_segment: Vec<(Hash256, RpcBlock)>, + mut chain_segment: Vec<(Hash256, RangeSyncBlock)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -623,24 +623,14 @@ pub fn signature_verify_chain_segment( let consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); - match block { - RpcBlock::FullyAvailable(available_block) => { - available_blocks.push(available_block.clone()); - signature_verified_blocks.push(SignatureVerifiedBlock { - block: MaybeAvailableBlock::Available(available_block), - block_root, - parent: None, - consensus_context, - }); - } - RpcBlock::BlockOnly { .. } => { - // RangeSync and BackfillSync already ensure that the chain segment is fully available - // so this shouldn't be possible in practice. - return Err(BlockError::InternalError( - "Chain segment is not fully available".to_string(), - )); - } - } + let available_block = block.into_available_block(); + available_blocks.push(available_block.clone()); + signature_verified_blocks.push(SignatureVerifiedBlock { + block: MaybeAvailableBlock::Available(available_block), + block_root, + parent: None, + consensus_context, + }); } chain @@ -1315,11 +1305,11 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } } -impl IntoExecutionPendingBlock for RpcBlock { +impl IntoExecutionPendingBlock for RangeSyncBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. #[instrument( - name = "rpc_block_into_execution_pending_block_slashable", + name = "range_sync_block_into_execution_pending_block_slashable", level = "debug" skip_all, )] @@ -1333,24 +1323,51 @@ impl IntoExecutionPendingBlock for RpcBlock let block_root = check_block_relevancy(self.as_block(), block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; - let maybe_available_block = match &self { - RpcBlock::FullyAvailable(available_block) => { - chain - .data_availability_checker - .verify_kzg_for_available_block(available_block) - .map_err(|e| { - BlockSlashInfo::SignatureNotChecked( - self.signed_block_header(), - BlockError::AvailabilityCheck(e), - ) - })?; - MaybeAvailableBlock::Available(available_block.clone()) - } - // No need to perform KZG verification unless we have a fully available block - RpcBlock::BlockOnly { block, block_root } => MaybeAvailableBlock::AvailabilityPending { - block_root: *block_root, - block: block.clone(), - }, + let available_block = self.into_available_block(); + chain + .data_availability_checker + .verify_kzg_for_available_block(&available_block) + .map_err(|e| { + BlockSlashInfo::SignatureNotChecked( + available_block.as_block().signed_block_header(), + BlockError::AvailabilityCheck(e), + ) + })?; + let maybe_available_block = MaybeAvailableBlock::Available(available_block); + SignatureVerifiedBlock::check_slashable(maybe_available_block, block_root, chain)? + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) + } + + fn block(&self) -> &SignedBeaconBlock { + self.as_block() + } + + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } +} + +impl IntoExecutionPendingBlock for LookupBlock { + /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + #[instrument( + name = "lookup_block_into_execution_pending_block_slashable", + level = "debug" + skip_all, + )] + fn into_execution_pending_block_slashable( + self, + block_root: Hash256, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, BlockSlashInfo> { + // Perform an early check to prevent wasting time on irrelevant blocks. + let block_root = check_block_relevancy(self.as_block(), block_root, chain) + .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; + + let maybe_available_block = MaybeAvailableBlock::AvailabilityPending { + block_root, + block: self.block_cloned(), }; SignatureVerifiedBlock::check_slashable(maybe_available_block, block_root, chain)? @@ -1374,7 +1391,7 @@ impl ExecutionPendingBlock { /// verification must be done upstream (e.g., via a `SignatureVerifiedBlock` /// /// Returns an error if the block is invalid, or if the block was unable to be verified. - #[instrument(skip_all, level = "debug")] + #[instrument(skip_all, level = "debug", fields(?block_root))] pub fn from_signature_verified_components( block: MaybeAvailableBlock, block_root: Hash256, @@ -1592,24 +1609,6 @@ impl ExecutionPendingBlock { metrics::stop_timer(committee_timer); - /* - * If we have block reward listeners, compute the block reward and push it to the - * event handler. - */ - if let Some(ref event_handler) = chain.event_handler - && event_handler.has_block_reward_subscribers() - { - let mut reward_cache = Default::default(); - let block_reward = chain.compute_block_reward( - block.message(), - block_root, - &state, - &mut reward_cache, - true, - )?; - event_handler.register(EventKind::BlockReward(block_reward)); - } - /* * Perform `per_block_processing` on the block and state, returning early if the block is * invalid. @@ -1955,13 +1954,13 @@ fn load_parent>( && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() { if block.as_block().is_parent_block_full(parent_bid_block_hash) { - // The parent block's envelope must have been imported for us to load the - // full state. If it hasn't arrived yet, return an unknown parent error so - // the block gets sent to the reprocess queue. - let envelope = chain - .store - .get_payload_envelope(&root)? - .ok_or(BlockError::ParentEnvelopeUnknown { parent_root: root })?; + // TODO(gloas): loading the envelope here is not very efficient + // TODO(gloas): check parent payload existence prior to this point? + let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + })?; (StatePayloadStatus::Full, envelope.message.state_root) } else { (StatePayloadStatus::Pending, parent_block.state_root()) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index f98cd40d08..be73ef15d7 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -13,76 +13,70 @@ use types::{ SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -/// A block that has been received over RPC. It has 2 internal variants: -/// -/// 1. `FullyAvailable`: A fully available block. This can either be a pre-deneb block, a -/// post-Deneb block with blobs, a post-Fulu block with the columns the node is required to custody, -/// or a post-Deneb block that doesn't require blobs/columns. Hence, it is fully self contained w.r.t -/// verification. i.e. this block has all the required data to get verified and imported into fork choice. -/// -/// 2. `BlockOnly`: This is a post-deneb block that requires blobs to be considered fully available. -#[derive(Clone, Educe)] -#[educe(Hash(bound(E: EthSpec)))] -pub enum RpcBlock { - FullyAvailable(AvailableBlock), - BlockOnly { - block: Arc>, - block_root: Hash256, - }, +/// A wrapper around a `SignedBeaconBlock`. This varaint is constructed +/// when lookup sync only fetches a single block. It does not contain +/// any blobs or data columns. +pub struct LookupBlock { + block: Arc>, + block_root: Hash256, } -impl Debug for RpcBlock { +impl LookupBlock { + pub fn new(block: Arc>) -> Self { + let block_root = block.canonical_root(); + Self { block, block_root } + } + + pub fn block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } +} + +/// A fully available block that has been constructed by range sync. +/// The block contains all the data required to import into fork choice. +/// This includes any and all blobs/columns required, including zero if +/// none are required. This can happen if the block is pre-deneb or if +/// it's simply past the DA boundary. +#[derive(Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] +pub struct RangeSyncBlock { + block: AvailableBlock, +} + +impl Debug for RangeSyncBlock { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "RpcBlock({:?})", self.block_root()) } } -impl RpcBlock { +impl RangeSyncBlock { pub fn block_root(&self) -> Hash256 { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_root(), - RpcBlock::BlockOnly { block_root, .. } => *block_root, - } + self.block.block_root() } pub fn as_block(&self) -> &SignedBeaconBlock { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block(), - RpcBlock::BlockOnly { block, .. } => block, - } + self.block.block() } pub fn block_cloned(&self) -> Arc> { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_cloned(), - RpcBlock::BlockOnly { block, .. } => block.clone(), - } + self.block.block_cloned() } - pub fn block_data(&self) -> Option<&AvailableBlockData> { - match self { - RpcBlock::FullyAvailable(available_block) => Some(available_block.data()), - RpcBlock::BlockOnly { .. } => None, - } + pub fn block_data(&self) -> &AvailableBlockData { + self.block.data() } } -impl RpcBlock { - /// Constructs an `RpcBlock` from a block and optional availability data. - /// - /// This function creates an RpcBlock which can be in one of two states: - /// - `FullyAvailable`: When `block_data` is provided, the block contains all required - /// data for verification. - /// - `BlockOnly`: When `block_data` is `None`, the block may still need additional - /// data to be considered fully available (used during block lookups or when blobs - /// will arrive separately). - /// - /// # Validation - /// - /// When `block_data` is provided, this function validates that: - /// - Block data is not provided when not required. - /// - Required blobs are present and match the expected count. - /// - Required custody columns are included based on the nodes custody requirements. +impl RangeSyncBlock { + /// Constructs an `RangeSyncBlock` from a block and availability data. /// /// # Errors /// @@ -92,62 +86,41 @@ impl RpcBlock { /// - `MissingCustodyColumns`: Block requires custody columns but they are incomplete. pub fn new( block: Arc>, - block_data: Option>, + block_data: AvailableBlockData, da_checker: &DataAvailabilityChecker, spec: Arc, ) -> Result where T: BeaconChainTypes, { - match block_data { - Some(block_data) => Ok(RpcBlock::FullyAvailable(AvailableBlock::new( - block, block_data, da_checker, spec, - )?)), - None => Ok(RpcBlock::BlockOnly { - block_root: block.canonical_root(), - block, - }), - } + let available_block = AvailableBlock::new(block, block_data, da_checker, spec)?; + Ok(Self { + block: available_block, + }) } #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Hash256, - Arc>, - Option>, - ) { - match self { - RpcBlock::FullyAvailable(available_block) => { - let (block_root, block, block_data) = available_block.deconstruct(); - (block_root, block, Some(block_data)) - } - RpcBlock::BlockOnly { block, block_root } => (block_root, block, None), - } + pub fn deconstruct(self) -> (Hash256, Arc>, AvailableBlockData) { + self.block.deconstruct() } pub fn n_blobs(&self) -> usize { - if let Some(block_data) = self.block_data() { - match block_data { - AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, - AvailableBlockData::Blobs(blobs) => blobs.len(), - } - } else { - 0 + match self.block_data() { + AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, + AvailableBlockData::Blobs(blobs) => blobs.len(), } } pub fn n_data_columns(&self) -> usize { - if let Some(block_data) = self.block_data() { - match block_data { - AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, - AvailableBlockData::DataColumns(columns) => columns.len(), - } - } else { - 0 + match self.block_data() { + AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, + AvailableBlockData::DataColumns(columns) => columns.len(), } } + + pub fn into_available_block(self) -> AvailableBlock { + self.block + } } /// A block that has gone through all pre-deneb block processing checks including block processing @@ -412,7 +385,7 @@ impl AsBlock for AvailableBlock { } } -impl AsBlock for RpcBlock { +impl AsBlock for RangeSyncBlock { fn slot(&self) -> Slot { self.as_block().slot() } @@ -432,24 +405,42 @@ impl AsBlock for RpcBlock { self.as_block().message() } fn as_block(&self) -> &SignedBeaconBlock { - match self { - Self::BlockOnly { - block, - block_root: _, - } => block, - Self::FullyAvailable(available_block) => available_block.block(), - } + self.block.as_block() } fn block_cloned(&self) -> Arc> { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_cloned(), - RpcBlock::BlockOnly { - block, - block_root: _, - } => block.clone(), - } + self.block.block_cloned() } fn canonical_root(&self) -> Hash256 { - self.as_block().canonical_root() + self.block.block_root() + } +} + +impl AsBlock for LookupBlock { + fn slot(&self) -> Slot { + self.block().slot() + } + fn epoch(&self) -> Epoch { + self.block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef<'_, E> { + self.block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + self.block() + } + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } + fn canonical_root(&self) -> Hash256 { + self.block_root } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 59fa5ec9ec..7eb92060a2 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -358,6 +358,7 @@ where Ok(( BeaconSnapshot { beacon_block_root, + execution_envelope: None, beacon_block: Arc::new(beacon_block), beacon_state, }, @@ -616,8 +617,10 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); + // TODO(gloas): add check that checkpoint state is Pending let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, + execution_envelope: None, beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; @@ -800,6 +803,7 @@ where let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, + execution_envelope: None, beacon_block: Arc::new(head_block), beacon_state: head_state, }; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index fd060e2b59..3a429bdb8a 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -319,6 +319,7 @@ impl CanonicalHead { let snapshot = BeaconSnapshot { beacon_block_root, + execution_envelope: None, beacon_block: Arc::new(beacon_block), beacon_state, }; @@ -370,6 +371,13 @@ impl CanonicalHead { Ok((head, execution_status)) } + // TODO(gloas) just a stub for now, implement this once we have fork choice. + /// Returns true if the payload for this block is canonical according to fork choice + /// Returns an error if the block root doesn't exist in fork choice. + pub fn block_has_canonical_payload(&self, _root: &Hash256) -> Result { + Ok(true) + } + /// Returns a clone of `self.cached_head`. /// /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). @@ -695,6 +703,7 @@ impl BeaconChain { BeaconSnapshot { beacon_block: Arc::new(beacon_block), + execution_envelope: None, beacon_block_root: new_view.head_block_root, beacon_state, } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index e266e02f7f..4372efa809 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -891,7 +891,7 @@ impl MaybeAvailableBlock { mod test { use super::*; use crate::CustodyContext; - use crate::block_verification_types::RpcBlock; + use crate::block_verification_types::RangeSyncBlock; use crate::custody_context::NodeCustodyType; use crate::data_column_verification::CustodyDataColumn; use crate::test_utils::{ @@ -1085,7 +1085,7 @@ mod test { /// Regression test for KZG verification truncation bug (https://github.com/sigp/lighthouse/pull/7927) #[test] - fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns_fulu() { + fn verify_kzg_for_range_sync_blocks_should_not_truncate_data_columns_fulu() { let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let da_checker = new_da_checker(spec.clone()); @@ -1128,17 +1128,14 @@ mod test { let block_data = AvailableBlockData::new_with_data_columns(custody_columns); let da_checker = Arc::new(new_da_checker(spec.clone())); - RpcBlock::new(Arc::new(block), Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(Arc::new(block), block_data, &da_checker, spec.clone()) .expect("should create RPC block with custody columns") }) .collect::>(); let available_blocks = blocks_with_columns - .iter() - .filter_map(|block| match block { - RpcBlock::FullyAvailable(available_block) => Some(available_block.clone()), - RpcBlock::BlockOnly { .. } => None, - }) + .into_iter() + .map(|block| block.into_available_block()) .collect::>(); // WHEN verifying all blocks together (totalling 256 data columns) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 9ae9213a70..dde9fad342 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -706,6 +706,8 @@ fn verify_proposer_and_signature( index = %column_index, "Proposer shuffling cache miss for column verification" ); + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root chain .store .get_advanced_hot_state( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d881a0edb9..9362aa0765 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -8,6 +8,7 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; +use crate::payload_envelope_streamer::Error as EnvelopeStreamerError; use bls::PublicKeyBytes; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; @@ -157,6 +158,7 @@ pub enum BeaconChainError { reconstructed_transactions_root: Hash256, }, BlockStreamerError(BlockStreamerError), + EnvelopeStreamerError(EnvelopeStreamerError), AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), PrepareProposerFailed(BlockProcessingError), diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index fbe8cc3ef9..9b3a3eae0f 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -21,7 +21,6 @@ pub struct ServerSentEventHandler { late_head: Sender>, light_client_finality_update_tx: Sender>, light_client_optimistic_update_tx: Sender>, - block_reward_tx: Sender>, proposer_slashing_tx: Sender>, attester_slashing_tx: Sender>, bls_to_execution_change_tx: Sender>, @@ -50,7 +49,6 @@ impl ServerSentEventHandler { let (late_head, _) = broadcast::channel(capacity); let (light_client_finality_update_tx, _) = broadcast::channel(capacity); let (light_client_optimistic_update_tx, _) = broadcast::channel(capacity); - let (block_reward_tx, _) = broadcast::channel(capacity); let (proposer_slashing_tx, _) = broadcast::channel(capacity); let (attester_slashing_tx, _) = broadcast::channel(capacity); let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); @@ -73,7 +71,6 @@ impl ServerSentEventHandler { late_head, light_client_finality_update_tx, light_client_optimistic_update_tx, - block_reward_tx, proposer_slashing_tx, attester_slashing_tx, bls_to_execution_change_tx, @@ -148,10 +145,6 @@ impl ServerSentEventHandler { .light_client_optimistic_update_tx .send(kind) .map(|count| log_count("light client optimistic update", count)), - EventKind::BlockReward(_) => self - .block_reward_tx - .send(kind) - .map(|count| log_count("block reward", count)), EventKind::ProposerSlashing(_) => self .proposer_slashing_tx .send(kind) @@ -238,10 +231,6 @@ impl ServerSentEventHandler { self.light_client_optimistic_update_tx.subscribe() } - pub fn subscribe_block_reward(&self) -> Receiver> { - self.block_reward_tx.subscribe() - } - pub fn subscribe_attester_slashing(&self) -> Receiver> { self.attester_slashing_tx.subscribe() } @@ -306,10 +295,6 @@ impl ServerSentEventHandler { self.late_head.receiver_count() > 0 } - pub fn has_block_reward_subscribers(&self) -> bool { - self.block_reward_tx.receiver_count() > 0 - } - pub fn has_proposer_slashing_subscribers(&self) -> bool { self.proposer_slashing_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 522cd7dc11..f321ea9f1b 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -25,7 +25,6 @@ use state_processing::per_block_processing::{ use std::sync::Arc; use tokio::task::JoinHandle; use tracing::{Instrument, debug_span, warn}; -use tree_hash::TreeHash; use types::execution::BlockProductionVersion; use types::*; @@ -111,7 +110,6 @@ impl PayloadNotifier { } else { notify_new_payload( &self.chain, - self.block.message().tree_hash_root(), self.block.message().slot(), self.block.message().parent_root(), self.block.message().try_into()?, @@ -121,7 +119,7 @@ impl PayloadNotifier { } } -/// Verify that `execution_payload` associated with `beacon_block_root` is considered valid by an execution +/// Verify that `execution_payload` is considered valid by an execution /// engine. /// /// ## Specification @@ -132,7 +130,6 @@ impl PayloadNotifier { /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload pub async fn notify_new_payload( chain: &Arc>, - beacon_block_root: Hash256, slot: Slot, parent_beacon_block_root: Hash256, new_payload_request: NewPayloadRequest<'_, T::EthSpec>, @@ -161,7 +158,6 @@ pub async fn notify_new_payload( ?validation_error, ?latest_valid_hash, ?execution_block_hash, - root = ?beacon_block_root, %slot, method = "new_payload", "Invalid execution payload" @@ -202,7 +198,6 @@ pub async fn notify_new_payload( warn!( ?validation_error, ?execution_block_hash, - root = ?beacon_block_root, %slot, method = "new_payload", "Invalid execution payload block hash" diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1dae2258f6..bfda52558e 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -165,13 +165,8 @@ impl BeaconChain { } // Store the blobs or data columns too - if let Some(op) = self - .get_blobs_or_columns_store_op(block_root, block.slot(), block_data) - .map_err(|e| { - HistoricalBlockError::StoreError(StoreError::DBError { - message: format!("get_blobs_or_columns_store_op error {e:?}"), - }) - })? + if let Some(op) = + self.get_blobs_or_columns_store_op(block_root, block.slot(), block_data) { blob_batch.extend(self.store.convert_to_kv_batch(vec![op])?); } diff --git a/beacon_node/beacon_chain/src/invariants.rs b/beacon_node/beacon_chain/src/invariants.rs new file mode 100644 index 0000000000..7bcec7b0b4 --- /dev/null +++ b/beacon_node/beacon_chain/src/invariants.rs @@ -0,0 +1,56 @@ +//! Beacon chain database invariant checks. +//! +//! Builds the `InvariantContext` from beacon chain state and delegates all checks +//! to `HotColdDB::check_invariants`. + +use crate::BeaconChain; +use crate::beacon_chain::BeaconChainTypes; +use store::invariants::{InvariantCheckResult, InvariantContext}; + +impl BeaconChain { + /// Run all database invariant checks. + /// + /// Collects context from fork choice, state cache, custody columns, and pubkey cache, + /// then delegates to the store-level `check_invariants` method. + pub fn check_database_invariants(&self) -> Result { + let fork_choice_blocks = { + let fc = self.canonical_head.fork_choice_read_lock(); + let proto_array = fc.proto_array().core_proto_array(); + proto_array + .nodes + .iter() + .filter(|node| { + // Only check blocks that are descendants of the finalized checkpoint. + // Pruned non-canonical fork blocks may linger in the proto-array but + // are legitimately absent from the database. + fc.is_finalized_checkpoint_or_descendant(node.root) + }) + .map(|node| (node.root, node.slot)) + .collect() + }; + + let custody_context = self.data_availability_checker.custody_context(); + + let ctx = InvariantContext { + fork_choice_blocks, + state_cache_roots: self.store.state_cache.lock().state_roots(), + custody_columns: custody_context + .custody_columns_for_epoch(None, &self.spec) + .to_vec(), + pubkey_cache_pubkeys: { + let cache = self.validator_pubkey_cache.read(); + (0..cache.len()) + .filter_map(|i| { + cache.get(i).map(|pk| { + use store::StoreItem; + crate::validator_pubkey_cache::DatabasePubkey::from_pubkey(pk) + .as_store_bytes() + }) + }) + .collect() + }, + }; + + self.store.check_invariants(&ctx) + } +} diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 33b3260361..10cb208729 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,6 +1,5 @@ use kzg::{ - Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, - Error as KzgError, Kzg, KzgBlobRef, + Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, KzgBlobRef, }; use rayon::prelude::*; use ssz_types::{FixedVector, VariableList}; @@ -15,18 +14,18 @@ use types::{ SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlindedBeaconBlock, Slot, }; -/// Converts a blob ssz List object to an array to be used with the kzg -/// crypto library. -fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { - KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into) +/// Converts a blob ssz FixedVector to a reference to a fixed-size array +/// to be used with `rust_eth_kzg`. +fn ssz_blob_to_kzg_blob_ref(blob: &Blob) -> Result, KzgError> { + blob.as_ref().try_into().map_err(|e| { + KzgError::InconsistentArrayLength(format!( + "blob should have a guaranteed size due to FixedVector: {e:?}" + )) + }) } -fn ssz_blob_to_crypto_blob_boxed(blob: &Blob) -> Result, KzgError> { - ssz_blob_to_crypto_blob::(blob).map(Box::new) -} - -/// Converts a cell ssz List object to an array to be used with the kzg -/// crypto library. +/// Converts a cell ssz FixedVector to a reference to a fixed-size array +/// to be used with `rust_eth_kzg`. fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result, KzgError> { let cell_bytes: &[u8] = cell.as_ref(); cell_bytes @@ -42,8 +41,8 @@ pub fn validate_blob( kzg_proof: KzgProof, ) -> Result<(), KzgError> { let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.verify_blob_kzg_proof(kzg_blob, kzg_commitment, kzg_proof) } /// Validate a batch of `DataColumnSidecar`. @@ -72,7 +71,7 @@ where } for &proof in data_column.kzg_proofs() { - proofs.push(Bytes48::from(proof)); + proofs.push(proof.0); } // In Gloas, commitments come from the block's ExecutionPayloadBid, not the sidecar. @@ -90,7 +89,7 @@ where }; for &commitment in kzg_commitments.iter() { - commitments.push(Bytes48::from(commitment)); + commitments.push(commitment.0); } let expected_len = column_indices.len(); @@ -120,7 +119,7 @@ pub fn validate_blobs( let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); let blobs = blobs .into_iter() - .map(|blob| ssz_blob_to_crypto_blob::(blob)) + .map(|blob| ssz_blob_to_kzg_blob_ref::(blob)) .collect::, KzgError>>()?; kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs) @@ -132,8 +131,8 @@ pub fn compute_blob_kzg_proof( blob: &Blob, kzg_commitment: KzgCommitment, ) -> Result { - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.compute_blob_kzg_proof(kzg_blob, kzg_commitment) } /// Compute the kzg commitment for a given blob. @@ -141,8 +140,8 @@ pub fn blob_to_kzg_commitment( kzg: &Kzg, blob: &Blob, ) -> Result { - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.blob_to_kzg_commitment(&kzg_blob) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.blob_to_kzg_commitment(kzg_blob) } /// Compute the kzg proof for a given blob and an evaluation point z. @@ -151,10 +150,9 @@ pub fn compute_kzg_proof( blob: &Blob, z: Hash256, ) -> Result<(KzgProof, Hash256), KzgError> { - let z = z.0.into(); - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.compute_kzg_proof(&kzg_blob, &z) - .map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec()))) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.compute_kzg_proof(kzg_blob, &z.0) + .map(|(proof, z)| (proof, Hash256::from_slice(&z))) } /// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` @@ -165,7 +163,7 @@ pub fn verify_kzg_proof( z: Hash256, y: Hash256, ) -> Result { - kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) + kzg.verify_kzg_proof(kzg_commitment, &z.0, &y.0, kzg_proof) } /// Build data column sidecars from a signed beacon block and its blobs. diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 312598c72e..fb23bc706c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -10,7 +10,6 @@ mod beacon_snapshot; pub mod bellatrix_readiness; pub mod blob_verification; mod block_production; -pub mod block_reward; mod block_times_cache; mod block_verification; pub mod block_verification_types; @@ -31,6 +30,7 @@ pub mod fork_choice_signal; pub mod graffiti_calculator; pub mod historical_blocks; pub mod historical_data_columns; +pub mod invariants; pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; @@ -44,6 +44,7 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; +pub mod payload_envelope_streamer; pub mod payload_envelope_verification; pub mod pending_payload_envelopes; pub mod persisted_beacon_chain; @@ -75,7 +76,7 @@ pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{ - BeaconForkChoiceStore, Error as ForkChoiceStoreError, PersistedForkChoiceStoreV17, + BeaconForkChoiceStore, Error as ForkChoiceStoreError, PersistedForkChoiceStore, PersistedForkChoiceStoreV28, }; pub use block_verification::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 1ad325ebe1..786daa09da 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -49,16 +49,6 @@ pub static ENVELOPE_PROCESSING_DB_WRITE: LazyLock> = LazyLock: "Time spent writing a newly processed payload envelope and state to DB", ) }); -pub static ENVELOPE_PROCESSING_POST_EXEC_PROCESSING: LazyLock> = - LazyLock::new(|| { - try_create_histogram_with_buckets( - "payload_envelope_processing_post_exec_pre_attestable_seconds", - "Time between finishing execution processing and the payload envelope - becoming attestable", - linear_buckets(0.01, 0.01, 15), - ) - }); - /* * Block Processing */ diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs new file mode 100644 index 0000000000..47c58f07b9 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs @@ -0,0 +1,42 @@ +use std::sync::Arc; + +#[cfg(test)] +use mockall::automock; +use task_executor::TaskExecutor; +use types::{Hash256, SignedExecutionPayloadEnvelope, Slot}; + +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; + +/// An adapter to the `BeaconChain` functionalities to remove `BeaconChain` from direct dependency to enable testing envelope streamer logic. +pub(crate) struct EnvelopeStreamerBeaconAdapter { + chain: Arc>, +} + +#[cfg_attr(test, automock, allow(dead_code))] +impl EnvelopeStreamerBeaconAdapter { + pub(crate) fn new(chain: Arc>) -> Self { + Self { chain } + } + + pub(crate) fn executor(&self) -> &TaskExecutor { + &self.chain.task_executor + } + + pub(crate) fn get_payload_envelope( + &self, + root: &Hash256, + ) -> Result>, store::Error> { + self.chain.store.get_payload_envelope(root) + } + + pub(crate) fn get_split_slot(&self) -> Slot { + self.chain.store.get_split_info().slot + } + + pub(crate) fn block_has_canonical_payload( + &self, + root: &Hash256, + ) -> Result { + self.chain.canonical_head.block_has_canonical_payload(root) + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs new file mode 100644 index 0000000000..d10e3762a4 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs @@ -0,0 +1,219 @@ +mod beacon_chain_adapter; +#[cfg(test)] +mod tests; + +use std::sync::Arc; + +#[cfg_attr(test, double)] +use crate::payload_envelope_streamer::beacon_chain_adapter::EnvelopeStreamerBeaconAdapter; +use futures::Stream; +#[cfg(test)] +use mockall_double::double; +use tokio::sync::mpsc::{self, UnboundedSender}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::{debug, error, warn}; +use types::{EthSpec, Hash256, SignedExecutionPayloadEnvelope}; + +#[cfg(not(test))] +use crate::BeaconChain; +use crate::{BeaconChainError, BeaconChainTypes}; + +type PayloadEnvelopeResult = + Result>>, BeaconChainError>; + +#[derive(Debug)] +pub enum Error { + BlockMissingFromForkChoice, +} + +#[derive(Debug, PartialEq)] +pub enum EnvelopeRequestSource { + ByRoot, + ByRange, +} + +pub struct PayloadEnvelopeStreamer { + adapter: EnvelopeStreamerBeaconAdapter, + request_source: EnvelopeRequestSource, +} + +// TODO(gloas) eventually we'll need to expand this to support loading blinded payload envelopes from the db +// and fetching the execution payload from the EL. See BlockStreamer impl as an example +impl PayloadEnvelopeStreamer { + pub(crate) fn new( + adapter: EnvelopeStreamerBeaconAdapter, + request_source: EnvelopeRequestSource, + ) -> Arc { + Arc::new(Self { + adapter, + request_source, + }) + } + + // TODO(gloas) simply a stub impl for now. Should check some exec payload envelope cache + // and return the envelope if it exists in the cache + fn check_payload_envelope_cache( + &self, + _beacon_block_root: &Hash256, + ) -> Option>> { + // if self.check_caches == CheckCaches::Yes + None + } + + fn load_envelope( + self: &Arc, + beacon_block_root: &Hash256, + ) -> Result>>, BeaconChainError> { + if let Some(cached_envelope) = self.check_payload_envelope_cache(beacon_block_root) { + Ok(Some(cached_envelope)) + } else { + // TODO(gloas) we'll want to use the execution layer directly to call + // the engine api method eth_getPayloadBodiesByRange() + match self.adapter.get_payload_envelope(beacon_block_root) { + Ok(opt_envelope) => Ok(opt_envelope.map(Arc::new)), + Err(e) => Err(BeaconChainError::DBError(e)), + } + } + } + + async fn load_envelopes( + self: &Arc, + block_roots: &[Hash256], + ) -> Result)>, BeaconChainError> { + let streamer = self.clone(); + let block_roots = block_roots.to_vec(); + let split_slot = streamer.adapter.get_split_slot(); + // Loading from the DB is slow -> spawn a blocking task + self.adapter + .executor() + .spawn_blocking_handle( + move || { + let mut results: Vec<(Hash256, PayloadEnvelopeResult)> = Vec::new(); + for root in block_roots.iter() { + // TODO(gloas) we are loading the full envelope from the db. + // in a future PR we will only be storing the blinded envelope. + // When that happens we'll need to use the EL here to fetch + // the payload and reconstruct the non-blinded envelope. + let opt_envelope = match streamer.load_envelope(root) { + Ok(opt_envelope) => opt_envelope, + Err(e) => { + results.push((*root, Err(e))); + continue; + } + }; + + if streamer.request_source == EnvelopeRequestSource::ByRoot { + // No envelope verification required for `ENVELOPE_BY_ROOT` requests. + // If we only served envelopes that match our canonical view, nodes + // wouldn't be able to sync other branches. + results.push((*root, Ok(opt_envelope))); + continue; + } + + // When loading envelopes on or after the split slot, we must cross reference the bid from the child beacon block. + // There can be payloads that have been imported into the hot db but don't match our current view + // of the canonical chain. + + if let Some(envelope) = opt_envelope { + // Ensure that the envelopes we're serving match our view of the canonical chain. + + // When loading envelopes before the split slot, there is no need to check. + // Non-canonical payload envelopes will have already been pruned. + if split_slot > envelope.slot() { + results.push((*root, Ok(Some(envelope)))); + continue; + } + + match streamer.adapter.block_has_canonical_payload(root) { + Ok(is_envelope_canonical) => { + if is_envelope_canonical { + results.push((*root, Ok(Some(envelope)))); + } else { + results.push((*root, Ok(None))); + } + } + Err(_) => { + results.push(( + *root, + Err(BeaconChainError::EnvelopeStreamerError( + Error::BlockMissingFromForkChoice, + )), + )); + } + } + } else { + results.push((*root, Ok(None))); + } + } + results + }, + "load_execution_payload_envelopes", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin) + } + + async fn stream_payload_envelopes( + self: Arc, + beacon_block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + let results = match self.load_envelopes(&beacon_block_roots).await { + Ok(results) => results, + Err(e) => { + warn!(error = ?e, "Failed to load payload envelopes"); + send_errors(&beacon_block_roots, sender, e).await; + return; + } + }; + + for (root, result) in results { + if sender.send((root, Arc::new(result))).is_err() { + break; + } + } + } + + pub fn launch_stream( + self: Arc, + block_roots: Vec, + ) -> impl Stream>)> { + let (envelope_tx, envelope_rx) = mpsc::unbounded_channel(); + debug!( + envelopes = block_roots.len(), + "Launching a PayloadEnvelopeStreamer" + ); + let executor = self.adapter.executor().clone(); + executor.spawn( + self.stream_payload_envelopes(block_roots, envelope_tx), + "get_payload_envelopes_sender", + ); + UnboundedReceiverStream::new(envelope_rx) + } +} + +/// Create a `PayloadEnvelopeStreamer` from a `BeaconChain` and launch a stream. +#[cfg(not(test))] +pub fn launch_payload_envelope_stream( + chain: Arc>, + block_roots: Vec, + request_source: EnvelopeRequestSource, +) -> impl Stream>)> { + let adapter = beacon_chain_adapter::EnvelopeStreamerBeaconAdapter::new(chain); + PayloadEnvelopeStreamer::new(adapter, request_source).launch_stream(block_roots) +} + +async fn send_errors( + block_roots: &[Hash256], + sender: UnboundedSender<(Hash256, Arc>)>, + beacon_chain_error: BeaconChainError, +) { + let result = Arc::new(Err(beacon_chain_error)); + for beacon_block_root in block_roots { + if sender.send((*beacon_block_root, result.clone())).is_err() { + error!("EnvelopeStreamer channel closed unexpectedly"); + break; + } + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs new file mode 100644 index 0000000000..9e869a59b8 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs @@ -0,0 +1,386 @@ +use super::*; +use crate::payload_envelope_streamer::beacon_chain_adapter::MockEnvelopeStreamerBeaconAdapter; +use crate::test_utils::EphemeralHarnessType; +use bls::{FixedBytesExtended, Signature}; +use futures::StreamExt; +use std::collections::HashMap; +use task_executor::test_utils::TestRuntime; +use types::{ + ExecutionBlockHash, ExecutionPayloadEnvelope, ExecutionPayloadGloas, Hash256, MinimalEthSpec, + SignedExecutionPayloadEnvelope, Slot, +}; + +type E = MinimalEthSpec; +type T = EphemeralHarnessType; + +struct SlotEntry { + block_root: Hash256, + slot: Slot, + envelope: Option>, + non_canonical_envelope: bool, +} + +impl SlotEntry { + fn expect_envelope(&self, split_slot: Option) -> bool { + if self.envelope.is_none() { + return false; + } + if !self.non_canonical_envelope { + return true; + } + // Non-canonical envelopes before the split slot are returned + // (in production they would have been pruned). + split_slot.is_some_and(|s| self.slot < s) + } +} + +fn roots(chain: &[SlotEntry]) -> Vec { + chain.iter().map(|s| s.block_root).collect() +} + +/// Build test chain data. +fn build_chain( + num_slots: u64, + skipped_slots: &[u64], + missing_envelope_slots: &[u64], + non_canonical_envelope_slots: &[u64], +) -> Vec { + let mut chain = Vec::new(); + for i in 1..=num_slots { + if skipped_slots.contains(&i) { + continue; + } + let slot = Slot::new(i); + let block_root = Hash256::from_low_u64_be(i); + let has_envelope = !missing_envelope_slots.contains(&i); + let is_non_canonical = non_canonical_envelope_slots.contains(&i); + + let envelope = if has_envelope { + let block_hash = if is_non_canonical { + ExecutionBlockHash::from_root(Hash256::repeat_byte(0xFF)) + } else { + ExecutionBlockHash::from_root(Hash256::from_low_u64_be(i)) + }; + Some(SignedExecutionPayloadEnvelope { + message: ExecutionPayloadEnvelope { + payload: ExecutionPayloadGloas { + block_hash, + ..Default::default() + }, + execution_requests: Default::default(), + builder_index: 0, + beacon_block_root: block_root, + slot, + state_root: Hash256::zero(), + }, + signature: Signature::empty(), + }) + } else { + None + }; + + chain.push(SlotEntry { + block_root, + slot, + envelope, + non_canonical_envelope: is_non_canonical, + }); + } + chain +} + +fn mock_adapter() -> (MockEnvelopeStreamerBeaconAdapter, TestRuntime) { + let runtime = TestRuntime::default(); + let mut mock = MockEnvelopeStreamerBeaconAdapter::default(); + mock.expect_executor() + .return_const(runtime.task_executor.clone()); + (mock, runtime) +} + +/// Configure `get_payload_envelope` to return envelopes from chain data. +fn mock_envelopes(mock: &mut MockEnvelopeStreamerBeaconAdapter, chain: &[SlotEntry]) { + let envelope_map: HashMap>> = chain + .iter() + .map(|entry| (entry.block_root, entry.envelope.clone())) + .collect(); + mock.expect_get_payload_envelope() + .returning(move |root| Ok(envelope_map.get(root).cloned().flatten())); +} + +/// Configure `block_has_canonical_payload` based on chain's non-canonical entries. +fn mock_canonical_head(mock: &mut MockEnvelopeStreamerBeaconAdapter, chain: &[SlotEntry]) { + let non_canonical: Vec = chain + .iter() + .filter(|e| e.non_canonical_envelope) + .map(|e| e.block_root) + .collect(); + mock.expect_block_has_canonical_payload() + .returning(move |root| Ok(!non_canonical.contains(root))); +} + +fn unwrap_result( + result: &Arc>, +) -> &Option>> { + result + .as_ref() + .as_ref() + .expect("unexpected error in stream result") +} + +async fn assert_stream_matches( + stream: &mut (impl Stream>)> + Unpin), + chain: &[SlotEntry], + split_slot: Option, +) { + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + + let result = unwrap_result(&result); + + if entry.expect_envelope(split_slot) { + let envelope = result + .as_ref() + .unwrap_or_else(|| panic!("expected Some at index {i} but got None")); + let expected_envelope = entry.envelope.as_ref().unwrap(); + assert_eq!( + envelope.block_hash(), + expected_envelope.block_hash(), + "block_hash mismatch at index {i}" + ); + } else { + assert!( + result.is_none(), + "expected None at index {i} (missing or non-canonical), got Some" + ); + } + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// Happy path: all envelopes exist and are canonical. +#[tokio::test] +async fn stream_envelopes_by_range() { + let chain = build_chain(8, &[], &[], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + assert_stream_matches(&mut stream, &chain, None).await; +} + +/// Mixed chain: skipped slots, missing envelopes, and non-canonical envelopes. +#[tokio::test] +async fn stream_envelopes_by_range_mixed() { + let chain = build_chain(12, &[3, 8], &[5], &[7, 11]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + assert_stream_matches(&mut stream, &chain, None).await; +} + +/// Non-canonical envelopes before the split slot bypass canonical verification +/// and are returned. Non-canonical envelopes after the split slot are filtered out. +#[tokio::test] +async fn stream_envelopes_by_range_before_split() { + // Non-canonical envelopes at slots 2 and 4 (before split), slot 8 (after split). + let chain = build_chain(10, &[], &[], &[2, 4, 8]); + let split_slot = Slot::new(6); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(split_slot); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + assert_stream_matches(&mut stream, &chain, Some(split_slot)).await; +} + +#[tokio::test] +async fn stream_envelopes_empty_roots() { + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(vec![]); + assert!( + stream.next().await.is_none(), + "empty roots should produce no results" + ); +} + +#[tokio::test] +async fn stream_envelopes_single_root() { + let chain = build_chain(3, &[], &[], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(vec![chain[1].block_root]); + + let (root, result) = stream.next().await.expect("should get one result"); + assert_eq!(root, chain[1].block_root); + let envelope = unwrap_result(&result) + .as_ref() + .expect("should have envelope"); + assert_eq!( + envelope.block_hash(), + chain[1].envelope.as_ref().unwrap().block_hash(), + ); + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// ByRoot requests skip canonical verification, so non-canonical envelopes +/// should still be returned. `block_has_canonical_payload` should never be called. +#[tokio::test] +async fn stream_envelopes_by_root() { + let chain = build_chain(8, &[], &[], &[3, 5, 7]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock.expect_block_has_canonical_payload().times(0); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRoot); + let mut stream = streamer.launch_stream(roots(&chain)); + + // Every envelope should come back as Some, even the non-canonical ones. + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + + let envelope = unwrap_result(&result) + .as_ref() + .unwrap_or_else(|| panic!("expected Some at index {i} for ByRoot request")); + let expected_envelope = entry.envelope.as_ref().unwrap(); + assert_eq!( + envelope.block_hash(), + expected_envelope.block_hash(), + "block_hash mismatch at index {i}" + ); + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// When `block_has_canonical_payload` returns an error, the streamer should +/// yield `Err(EnvelopeStreamerError(BlockMissingFromForkChoice))` for those roots. +#[tokio::test] +async fn stream_envelopes_error() { + let chain = build_chain(4, &[], &[], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock.expect_block_has_canonical_payload() + .returning(|_| Err(BeaconChainError::CanonicalHeadLockTimeout)); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + assert!( + matches!( + result.as_ref(), + Err(BeaconChainError::EnvelopeStreamerError( + Error::BlockMissingFromForkChoice + )) + ), + "expected BlockMissingFromForkChoice error at index {i}, got {:?}", + result + ); + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// Requesting unknown roots (not in the store) via ByRange should return Ok(None). +#[tokio::test] +async fn stream_envelopes_by_range_unknown_roots() { + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock.expect_get_payload_envelope().returning(|_| Ok(None)); + + let unknown_roots: Vec = (1..=4) + .map(|i| Hash256::from_low_u64_be(i * 1000)) + .collect(); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(unknown_roots.clone()); + + for (i, expected_root) in unknown_roots.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, *expected_root, "root mismatch at index {i}"); + let envelope = unwrap_result(&result); + assert!( + envelope.is_none(), + "expected None for unknown root at index {i}" + ); + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// Requesting roots via ByRoot where some envelopes are missing should +/// return Ok(None) for those roots. +#[tokio::test] +async fn stream_envelopes_by_root_missing_envelopes() { + let chain = build_chain(6, &[], &[2, 4], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock.expect_block_has_canonical_payload().times(0); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRoot); + let mut stream = streamer.launch_stream(roots(&chain)); + + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + + let envelope_opt = unwrap_result(&result); + if let Some(entry_envelope) = &entry.envelope { + let envelope = envelope_opt + .as_ref() + .unwrap_or_else(|| panic!("expected Some at index {i}")); + assert_eq!( + envelope.block_hash(), + entry_envelope.block_hash(), + "block_hash mismatch at index {i}" + ); + } else { + assert!( + envelope_opt.is_none(), + "expected None for missing envelope at index {i}" + ); + } + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs index 4a2b152703..80c9c68c8d 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -12,8 +12,8 @@ use crate::{ PayloadVerificationOutcome, block_verification::PayloadVerificationHandle, payload_envelope_verification::{ - EnvelopeError, EnvelopeImportData, MaybeAvailableEnvelope, - gossip_verified_envelope::GossipVerifiedEnvelope, load_snapshot, + AvailableEnvelope, EnvelopeError, EnvelopeImportData, MaybeAvailableEnvelope, + gossip_verified_envelope::GossipVerifiedEnvelope, load_snapshot_from_state_root, payload_notifier::PayloadNotifier, }, }; @@ -32,11 +32,11 @@ impl GossipVerifiedEnvelope { ) -> Result, EnvelopeError> { let signed_envelope = self.signed_envelope; let envelope = &signed_envelope.message; - let payload = &envelope.payload; - // TODO(gloas) - - // Verify the execution payload is valid + // Define a future that will verify the execution payload with an execution engine. + // + // We do this as early as possible so that later parts of this function can run in parallel + // with the payload verification. let payload_notifier = PayloadNotifier::new( chain.clone(), signed_envelope.clone(), @@ -74,11 +74,7 @@ impl GossipVerifiedEnvelope { let snapshot = if let Some(snapshot) = self.snapshot { *snapshot } else { - load_snapshot( - signed_envelope.as_ref(), - &chain.canonical_head, - &chain.store, - )? + load_snapshot_from_state_root::(block_root, self.block.state_root(), &chain.store)? }; let mut state = snapshot.pre_state; @@ -94,13 +90,15 @@ impl GossipVerifiedEnvelope { )?; Ok(ExecutionPendingEnvelope { - signed_envelope: MaybeAvailableEnvelope::AvailabilityPending { - block_hash: payload.block_hash, - envelope: signed_envelope, - }, + signed_envelope: MaybeAvailableEnvelope::Available(AvailableEnvelope { + execution_block_hash: signed_envelope.block_hash(), + envelope: signed_envelope.clone(), + columns: vec![], + columns_available_timestamp: None, + spec: chain.spec.clone(), + }), import_data: EnvelopeImportData { block_root, - block: self.block, post_state: Box::new(state), }, payload_verification_handle, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 7b33d519e5..03a3a91ac5 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -31,11 +31,7 @@ pub struct GossipVerificationContext<'a, T: BeaconChainTypes> { } /// Verify that an execution payload envelope is consistent with its beacon block -/// and execution bid. This checks: -/// - The envelope slot is not prior to finalization -/// - The envelope slot matches the block slot -/// - The builder index matches the committed bid -/// - The payload block hash matches the committed bid +/// and execution bid. pub(crate) fn verify_envelope_consistency( envelope: &ExecutionPayloadEnvelope, block: &SignedBeaconBlock, @@ -51,7 +47,7 @@ pub(crate) fn verify_envelope_consistency( }); } - // Check that the slot of the envelope matches the slot of the parent block. + // Check that the slot of the envelope matches the slot of the block. if envelope.slot != block.slot() { return Err(EnvelopeError::SlotMismatch { block: block.slot(), @@ -147,18 +143,20 @@ impl GossipVerifiedEnvelope { // For external builder envelopes, we must load the state to access the builder registry. let builder_index = envelope.builder_index; let block_slot = envelope.slot; - let block_epoch = block_slot.epoch(T::EthSpec::slots_per_epoch()); - let proposer_shuffling_decision_block = - proto_block.proposer_shuffling_root_for_child_block(block_epoch, ctx.spec); + let envelope_epoch = block_slot.epoch(T::EthSpec::slots_per_epoch()); + // Since the payload's block is already guaranteed to be imported, the associated `proto_block.current_epoch_shuffling_id` + // already carries the correct `shuffling_decision_block`. + let proposer_shuffling_decision_block = proto_block + .current_epoch_shuffling_id + .shuffling_decision_block; let (signature_is_valid, opt_snapshot) = if builder_index == BUILDER_INDEX_SELF_BUILD { // Fast path: self-built envelopes can be verified without loading the state. let mut opt_snapshot = None; let proposer = beacon_proposer_cache::with_proposer_cache( ctx.beacon_proposer_cache, - ctx.spec, proposer_shuffling_decision_block, - block_epoch, + envelope_epoch, |proposers| proposers.get_slot::(block_slot), || { debug!( @@ -173,13 +171,14 @@ impl GossipVerifiedEnvelope { opt_snapshot = Some(Box::new(snapshot.clone())); Ok::<_, EnvelopeError>((snapshot.state_root, snapshot.pre_state)) }, + ctx.spec, )?; let expected_proposer = proposer.index; let fork = proposer.fork; if block.message().proposer_index() != expected_proposer as u64 { return Err(EnvelopeError::IncorrectBlockProposer { - block: block.message().proposer_index(), + proposer_index: block.message().proposer_index(), local_shuffling: expected_proposer as u64, }); } @@ -188,7 +187,7 @@ impl GossipVerifiedEnvelope { let pubkey = pubkey_cache .get(block.message().proposer_index() as usize) .ok_or_else(|| EnvelopeError::UnknownValidator { - builder_index: block.message().proposer_index(), + proposer_index: block.message().proposer_index(), })?; let is_valid = signed_envelope.verify_signature( pubkey, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 06818209a4..c406844d3e 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use std::time::Duration; +use eth2::types::{EventKind, SseExecutionPayloadAvailable}; use fork_choice::PayloadVerificationStatus; -use logging::crit; use slot_clock::SlotClock; use store::StoreOp; use tracing::{debug, error, info, info_span, instrument, warn}; -use types::{BeaconState, BlockImportSource, Hash256, SignedBeaconBlock, Slot}; +use types::{BeaconState, BlockImportSource, Hash256, Slot}; use super::{ AvailableEnvelope, AvailableExecutedEnvelope, EnvelopeError, EnvelopeImportData, @@ -15,15 +15,16 @@ use super::{ use crate::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, NotifyExecutionLayer, - block_verification_types::{AsBlock, AvailableBlockData}, + block_verification_types::AvailableBlockData, metrics, - payload_envelope_verification::{ExecutionPendingEnvelope, MaybeAvailableEnvelope}, + payload_envelope_verification::ExecutionPendingEnvelope, validator_monitor::{get_slot_delay_ms, timestamp_now}, }; -use eth2::types::{EventKind, SseExecutionPayloadAvailable}; + +const ENVELOPE_METRICS_CACHE_SLOT_LIMIT: u32 = 64; impl BeaconChain { - /// Returns `Ok(block_root)` if the given `unverified_envelope` was successfully verified and + /// Returns `Ok(status)` if the given `unverified_envelope` was successfully verified and /// imported into the chain. /// /// ## Errors @@ -76,6 +77,8 @@ impl BeaconChain { let envelope_times_cache = chain.envelope_times_cache.clone(); let slot_clock = chain.slot_clock.clone(); + // TODO(gloas): rename/refactor these `into_` names to be less similar and more clear + // about what the function actually does. let executed_envelope = chain .into_executed_payload_envelope(execution_pending) .await @@ -107,7 +110,7 @@ impl BeaconChain { // Verify and import the payload envelope. match import_envelope.await { - // The payload envelope was successfully verified and imported. Yay. + // The payload envelope was successfully verified and imported. Ok(status @ AvailabilityProcessingStatus::Imported(block_root)) => { info!( ?block_root, @@ -116,6 +119,11 @@ impl BeaconChain { "Execution payload envelope imported" ); + // TODO(gloas) do we need to send a `PayloadImported` event to the reprocess queue? + // TODO(gloas) do we need to recompute head? + // should canonical_head return the block and the payload now? + self.recompute_head_at_current_slot().await; + metrics::inc_counter(&metrics::ENVELOPE_PROCESSING_SUCCESSES); Ok(status) @@ -126,25 +134,13 @@ impl BeaconChain { Ok(status) } Err(EnvelopeError::BeaconChainError(e)) => { - match e.as_ref() { - BeaconChainError::TokioJoin(e) => { - debug!( - error = ?e, - "Envelope processing cancelled" - ); - } - _ => { - // There was an error whilst attempting to verify and import the payload envelope. It might - // be partially verified or partially imported. - crit!( - error = ?e, - "Envelope processing error" - ); - } - }; + if matches!(e.as_ref(), BeaconChainError::TokioJoin(_)) { + debug!(error = ?e, "Envelope processing cancelled"); + } else { + warn!(error = ?e, "Execution payload envelope rejected"); + } Err(EnvelopeError::BeaconChainError(e)) } - // The payload envelope failed verification. Err(other) => { warn!( reason = other.to_string(), @@ -175,22 +171,6 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; - // TODO(gloas): implement data column availability checking. - // For now, treat all envelopes as available after EL verification with empty columns. - let signed_envelope = match signed_envelope { - available @ MaybeAvailableEnvelope::Available(_) => available, - MaybeAvailableEnvelope::AvailabilityPending { - block_hash, - envelope, - } => MaybeAvailableEnvelope::Available(AvailableEnvelope::new( - block_hash, - envelope, - vec![], - None, - self.spec.clone(), - )), - }; - Ok(ExecutedEnvelope::new( signed_envelope, import_data, @@ -211,7 +191,6 @@ impl BeaconChain { let EnvelopeImportData { block_root, - block, post_state, } = import_data; @@ -228,7 +207,6 @@ impl BeaconChain { block_root, *post_state, payload_verification_outcome.payload_verification_status, - block, ) }, "payload_verification_handle", @@ -252,20 +230,8 @@ impl BeaconChain { block_root: Hash256, state: BeaconState, _payload_verification_status: PayloadVerificationStatus, - parent_block: Arc>, ) -> Result { // Everything in this initial section is on the hot path for processing the envelope. - - let post_exec_timer = - metrics::start_timer(&metrics::ENVELOPE_PROCESSING_POST_EXEC_PROCESSING); - - // Check the payloads parent block against weak subjectivity checkpoint. - self.check_block_against_weak_subjectivity_checkpoint( - parent_block.message(), - block_root, - &state, - )?; - // Take an upgradable read lock on fork choice so we can check if this block has already // been imported. We don't want to repeat work importing a block that is already imported. let fork_choice_reader = self.canonical_head.fork_choice_upgradable_read_lock(); @@ -288,7 +254,6 @@ impl BeaconChain { // TODO(gloas) Do we want to use an early attester cache like mechanism for payload enevelopes? // TODO(gloas) emit SSE event if the payload became the new head payload - drop(post_exec_timer); // It is important NOT to return errors here before the database commit, because the envelope // has already been added to fork choice and the database would be left in an inconsistent @@ -304,26 +269,12 @@ impl BeaconChain { let mut ops = vec![]; - match self.get_blobs_or_columns_store_op( + if let Some(blobs_or_columns_store_op) = self.get_blobs_or_columns_store_op( block_root, signed_envelope.slot(), AvailableBlockData::DataColumns(columns), ) { - Ok(Some(blobs_or_columns_store_op)) => { - ops.push(blobs_or_columns_store_op); - } - Ok(None) => {} - Err(e) => { - error!( - msg = "Restoring fork choice from disk", - error = &e, - ?block_root, - "Failed to store data columns into the database" - ); - // TODO(gloas) implement failed write handling to fork choice - // let _ = self.handle_import_block_db_write_error(fork_choice); - return Err(EnvelopeError::InternalError(e)); - } + ops.push(blobs_or_columns_store_op); } let db_write_timer = metrics::start_timer(&metrics::ENVELOPE_PROCESSING_DB_WRITE); @@ -389,7 +340,12 @@ impl BeaconChain { // Do not write to the cache for envelopes older than 2 epochs, this helps reduce writes // to the cache during sync. - if envelope_delay_total < self.slot_clock.slot_duration().saturating_mul(64) { + if envelope_delay_total + < self + .slot_clock + .slot_duration() + .saturating_mul(ENVELOPE_METRICS_CACHE_SLOT_LIMIT) + { self.envelope_times_cache.write().set_time_imported( block_root, envelope_slot, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 12019a436d..8ca6871dda 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -1,5 +1,5 @@ //! The incremental processing steps (e.g., signatures verified but not the state transition) is -//! represented as a sequence of wrapper-types around the block. There is a linear progression of +//! represented as a sequence of wrapper-types around the envelope. There is a linear progression of //! types, starting at a `SignedExecutionPayloadEnvelope` and finishing with an `AvailableExecutedEnvelope` (see //! diagram below). //! @@ -26,12 +26,12 @@ use state_processing::{BlockProcessingError, envelope_processing::EnvelopeProces use tracing::instrument; use types::{ BeaconState, BeaconStateError, ChainSpec, DataColumnSidecarList, EthSpec, ExecutionBlockHash, - ExecutionPayloadEnvelope, Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, + ExecutionPayloadEnvelope, Hash256, SignedExecutionPayloadEnvelope, Slot, }; use crate::{ BeaconChainError, BeaconChainTypes, BeaconStore, BlockError, ExecutionPayloadError, - PayloadVerificationOutcome, canonical_head::CanonicalHead, + PayloadVerificationOutcome, }; pub mod execution_pending_envelope; @@ -44,7 +44,6 @@ pub use execution_pending_envelope::ExecutionPendingEnvelope; #[derive(PartialEq)] pub struct EnvelopeImportData { pub block_root: Hash256, - pub block: Arc>, pub post_state: Box>, } @@ -116,7 +115,7 @@ pub struct EnvelopeProcessingSnapshot { /// /// /// It contains 2 variants: -/// 1. `Available`: This enelope has been executed and also contains all data to consider it +/// 1. `Available`: This envelope has been executed and also contains all data to consider it /// fully available. /// 2. `AvailabilityPending`: This envelope hasn't received all required blobs to consider it /// fully available. @@ -182,14 +181,17 @@ pub enum EnvelopeError { /// The envelope slot doesn't match the block SlotMismatch { block: Slot, envelope: Slot }, /// The validator index is unknown - UnknownValidator { builder_index: u64 }, + UnknownValidator { proposer_index: u64 }, /// The block hash doesn't match the committed bid BlockHashMismatch { committed_bid: ExecutionBlockHash, envelope: ExecutionBlockHash, }, /// The block's proposer_index does not match the locally computed proposer - IncorrectBlockProposer { block: u64, local_shuffling: u64 }, + IncorrectBlockProposer { + proposer_index: u64, + local_shuffling: u64, + }, /// The slot belongs to a block that is from a slot prior than /// to most recently finalized slot PriorToFinalization { @@ -269,7 +271,6 @@ impl From for EnvelopeError { } } -#[allow(clippy::type_complexity)] #[instrument(skip_all, level = "debug", fields(beacon_block_root = %beacon_block_root))] /// Load state from store given a known state root and block root. /// Use this when the proto block has already been looked up from fork choice. @@ -298,32 +299,3 @@ pub(crate) fn load_snapshot_from_state_root( beacon_block_root, }) } - -#[instrument(skip_all, level = "debug", fields(beacon_block_root = %envelope.beacon_block_root()))] -pub(crate) fn load_snapshot( - envelope: &SignedExecutionPayloadEnvelope, - canonical_head: &CanonicalHead, - store: &BeaconStore, -) -> Result, EnvelopeError> { - // Reject any envelope if its block is not known to fork choice. - // - // A block that is not in fork choice is either: - // - // - Not yet imported: we should reject this envelope because we should only import it after - // its parent block has been fully imported. - // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore the - // envelope because it will revert finalization. Note that the finalized block is stored in - // fork choice, so we will not reject any child of the finalized block (this is relevant - // during genesis). - - let fork_choice_read_lock = canonical_head.fork_choice_read_lock(); - let beacon_block_root = envelope.beacon_block_root(); - let Some(proto_beacon_block) = fork_choice_read_lock.get_block(&beacon_block_root) else { - return Err(EnvelopeError::BlockRootUnknown { - block_root: beacon_block_root, - }); - }; - drop(fork_choice_read_lock); - - load_snapshot_from_state_root::(beacon_block_root, proto_beacon_block.state_root, store) -} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs index f3e4f6990b..df21d33493 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs @@ -29,10 +29,10 @@ impl PayloadNotifier { let payload_verification_status = { let payload_message = &envelope.message; - // TODO(gloas) re-asses if optimistic syncing works similarly post-gloas match notify_execution_layer { NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { let new_payload_request = Self::build_new_payload_request(&envelope, &block)?; + // TODO(gloas): check and test RLP block hash calculation post-Gloas if let Err(e) = new_payload_request.perform_optimistic_sync_verifications() { warn!( block_number = ?payload_message.payload.block_number, @@ -61,17 +61,9 @@ impl PayloadNotifier { if let Some(precomputed_status) = self.payload_verification_status { Ok(precomputed_status) } else { - let block_root = self.envelope.message.beacon_block_root; let parent_root = self.block.message().parent_root(); let request = Self::build_new_payload_request(&self.envelope, &self.block)?; - notify_new_payload( - &self.chain, - block_root, - self.envelope.slot(), - parent_root, - request, - ) - .await + notify_new_payload(&self.chain, self.envelope.slot(), parent_root, request).await } } diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index d8fcc0901b..6229544e81 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,52 +1,19 @@ -use crate::{ - beacon_fork_choice_store::{PersistedForkChoiceStoreV17, PersistedForkChoiceStoreV28}, - metrics, -}; +use crate::{beacon_fork_choice_store::PersistedForkChoiceStoreV28, metrics}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use store::{DBColumn, Error, KeyValueStoreOp, StoreConfig, StoreItem}; +use store::{DBColumn, Error, KeyValueStoreOp, StoreConfig}; use superstruct::superstruct; use types::Hash256; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV28; -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { - #[superstruct(only(V17))] - pub fork_choice_v17: fork_choice::PersistedForkChoiceV17, - #[superstruct(only(V28))] pub fork_choice: fork_choice::PersistedForkChoiceV28, - #[superstruct(only(V17))] - pub fork_choice_store_v17: PersistedForkChoiceStoreV17, - #[superstruct(only(V28))] pub fork_choice_store: PersistedForkChoiceStoreV28, } -macro_rules! impl_store_item { - ($type:ty) => { - impl StoreItem for $type { - fn db_column() -> DBColumn { - DBColumn::ForkChoice - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } - } - }; -} - -impl_store_item!(PersistedForkChoiceV17); - impl PersistedForkChoiceV28 { pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { let decompressed_bytes = store_config diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ddc5978339..ed82143c38 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,11 +1,4 @@ //! Utilities for managing database schema changes. -mod migration_schema_v23; -mod migration_schema_v24; -mod migration_schema_v25; -mod migration_schema_v26; -mod migration_schema_v27; -mod migration_schema_v28; - use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; use store::Error as StoreError; @@ -13,81 +6,17 @@ use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}; /// Migrate the database from one schema version to another, applying all requisite mutations. +/// +/// All migrations for schema versions up to and including v28 have been removed. Nodes on live +/// networks are already running v28, so only the current version check remains. pub fn migrate_schema( - db: Arc>, + _db: Arc>, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), - // Upgrade across multiple versions by recursively migrating one step at a time. - (_, _) if from.as_u64() + 1 < to.as_u64() => { - let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), from, next)?; - migrate_schema::(db, next, to) - } - // Downgrade across multiple versions by recursively migrating one step at a time. - (_, _) if to.as_u64() + 1 < from.as_u64() => { - let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), from, next)?; - migrate_schema::(db, next, to) - } - - // - // Migrations from before SchemaVersion(22) are deprecated. - // - (SchemaVersion(22), SchemaVersion(23)) => { - let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(23), SchemaVersion(22)) => { - let ops = migration_schema_v23::downgrade_from_v23::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(23), SchemaVersion(24)) => { - let ops = migration_schema_v24::upgrade_to_v24::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(24), SchemaVersion(23)) => { - let ops = migration_schema_v24::downgrade_from_v24::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(24), SchemaVersion(25)) => { - let ops = migration_schema_v25::upgrade_to_v25()?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(25), SchemaVersion(24)) => { - let ops = migration_schema_v25::downgrade_from_v25()?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(25), SchemaVersion(26)) => { - let ops = migration_schema_v26::upgrade_to_v26::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(26), SchemaVersion(25)) => { - let ops = migration_schema_v26::downgrade_from_v26::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(26), SchemaVersion(27)) => { - // This migration updates the blobs db. The schema version - // is bumped inside upgrade_to_v27. - migration_schema_v27::upgrade_to_v27::(db.clone()) - } - (SchemaVersion(27), SchemaVersion(26)) => { - // Downgrading is essentially a no-op and is only possible - // if peer das isn't scheduled. - migration_schema_v27::downgrade_from_v27::(db.clone())?; - db.store_schema_version_atomically(to, vec![]) - } - (SchemaVersion(27), SchemaVersion(28)) => { - let ops = migration_schema_v28::upgrade_to_v28::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(28), SchemaVersion(27)) => { - let ops = migration_schema_v28::downgrade_from_v28::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs deleted file mode 100644 index e238e1efb6..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ /dev/null @@ -1,180 +0,0 @@ -use crate::BeaconForkChoiceStore; -use crate::beacon_chain::BeaconChainTypes; -use crate::persisted_fork_choice::PersistedForkChoiceV17; -use crate::schema_change::StoreError; -use crate::test_utils::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, PersistedBeaconChain}; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; -use tracing::{debug, info}; -use types::{Hash256, Slot}; - -/// Dummy value to use for the canonical head block root, see below. -pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); - -pub fn upgrade_to_v23( - db: Arc>, -) -> Result, Error> { - info!("Upgrading DB schema from v22 to v23"); - - // 1) Set the head-tracker to empty - let Some(persisted_beacon_chain_v22) = - db.get_item::(&BEACON_CHAIN_DB_KEY)? - else { - return Err(Error::MigrationError( - "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string() - )); - }; - - let persisted_beacon_chain = PersistedBeaconChain { - genesis_block_root: persisted_beacon_chain_v22.genesis_block_root, - }; - - let mut ops = vec![persisted_beacon_chain.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; - - // 2) Wipe out all state temporary flags. While un-used in V23, if there's a rollback we could - // end-up with an inconsistent DB. - for state_root_result in db - .hot_db - .iter_column_keys::(DBColumn::BeaconStateTemporary) - { - let state_root = state_root_result?; - debug!( - ?state_root, - "Deleting temporary state on v23 schema migration" - ); - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateTemporary, - state_root.as_slice().to_vec(), - )); - - // We also delete the temporary states themselves. Although there are known issue with - // temporary states and this could lead to DB corruption, we will only corrupt the DB in - // cases where the DB would be corrupted by restarting on v7.0.x. We consider these DBs - // "too far gone". Deleting here has the advantage of not generating warnings about - // disjoint state DAGs in the v24 upgrade, or the first pruning after migration. - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - )); - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - )); - } - - Ok(ops) -} - -pub fn downgrade_from_v23( - db: Arc>, -) -> Result, Error> { - let Some(persisted_beacon_chain) = db.get_item::(&BEACON_CHAIN_DB_KEY)? - else { - // The `PersistedBeaconChain` must exist if fork choice exists. - return Err(Error::MigrationError( - "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string(), - )); - }; - - // Recreate head-tracker from fork choice. - let Some(persisted_fork_choice) = db.get_item::(&FORK_CHOICE_DB_KEY)? - else { - // Fork choice should exist if the database exists. - return Err(Error::MigrationError( - "No fork choice found in DB".to_string(), - )); - }; - - // We use dummy roots for the justified states because we can source the balances from the v17 - // persited fork choice. The justified state root isn't required to look up the justified state's - // balances (as it would be in V28). This fork choice object with corrupt state roots SHOULD NOT - // be written to disk. - let dummy_justified_state_root = Hash256::repeat_byte(0x66); - let dummy_unrealized_justified_state_root = Hash256::repeat_byte(0x77); - - let fc_store = BeaconForkChoiceStore::from_persisted_v17( - persisted_fork_choice.fork_choice_store_v17, - dummy_justified_state_root, - dummy_unrealized_justified_state_root, - db.clone(), - ) - .map_err(|e| { - Error::MigrationError(format!( - "Error loading fork choice store from persisted: {e:?}" - )) - })?; - - // Doesn't matter what policy we use for invalid payloads, as our head calculation just - // considers descent from finalization. - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - &db.spec, - ) - .map_err(|e| { - Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) - })?; - - let heads = fork_choice - .proto_array() - .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()); - - let head_roots = heads.iter().map(|node| node.root).collect(); - let head_slots = heads.iter().map(|node| node.slot).collect(); - - let persisted_beacon_chain_v22 = PersistedBeaconChainV22 { - _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, - genesis_block_root: persisted_beacon_chain.genesis_block_root, - ssz_head_tracker: SszHeadTracker { - roots: head_roots, - slots: head_slots, - }, - }; - - let ops = vec![persisted_beacon_chain_v22.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; - - Ok(ops) -} - -/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. -/// -/// This is used when persisting the state of the `BeaconChain` to disk. -#[derive(Encode, Decode, Clone)] -pub struct SszHeadTracker { - roots: Vec, - slots: Vec, -} - -#[derive(Clone, Encode, Decode)] -pub struct PersistedBeaconChainV22 { - /// This value is ignored to resolve the issue described here: - /// - /// https://github.com/sigp/lighthouse/pull/1639 - /// - /// Its removal is tracked here: - /// - /// https://github.com/sigp/lighthouse/issues/1784 - pub _canonical_head_block_root: Hash256, - pub genesis_block_root: Hash256, - /// DEPRECATED - pub ssz_head_tracker: SszHeadTracker, -} - -impl StoreItem for PersistedBeaconChainV22 { - fn db_column() -> DBColumn { - DBColumn::BeaconChain - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs deleted file mode 100644 index 44e8894d6f..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs +++ /dev/null @@ -1,20 +0,0 @@ -use store::{DBColumn, Error, KeyValueStoreOp}; -use tracing::info; -use types::Hash256; - -pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; - -/// Delete the on-disk eth1 data. -pub fn upgrade_to_v25() -> Result, Error> { - info!("Deleting eth1 data from disk for v25 DB upgrade"); - Ok(vec![KeyValueStoreOp::DeleteKey( - DBColumn::Eth1Cache, - ETH1_CACHE_DB_KEY.as_slice().to_vec(), - )]) -} - -/// No-op: we don't need to recreate on-disk eth1 data, as previous versions gracefully handle -/// data missing from disk. -pub fn downgrade_from_v25() -> Result, Error> { - Ok(vec![]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs deleted file mode 100644 index 38714ea060..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs +++ /dev/null @@ -1,91 +0,0 @@ -use crate::BeaconChainTypes; -use crate::custody_context::CustodyContextSsz; -use crate::persisted_custody::{CUSTODY_DB_KEY, PersistedCustody}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::info; - -#[derive(Debug, Encode, Decode, Clone)] -pub(crate) struct CustodyContextSszV24 { - pub(crate) validator_custody_at_head: u64, - pub(crate) persisted_is_supernode: bool, -} - -pub(crate) struct PersistedCustodyV24(CustodyContextSszV24); - -impl StoreItem for PersistedCustodyV24 { - fn db_column() -> DBColumn { - DBColumn::CustodyContext - } - - fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - let custody_context = CustodyContextSszV24::from_ssz_bytes(bytes)?; - Ok(PersistedCustodyV24(custody_context)) - } -} - -/// Upgrade the `CustodyContext` entry to v26. -pub fn upgrade_to_v26( - db: Arc>, -) -> Result, Error> { - let ops = if db.spec.is_peer_das_scheduled() { - match db.get_item::(&CUSTODY_DB_KEY) { - Ok(Some(PersistedCustodyV24(ssz_v24))) => { - info!("Migrating `CustodyContext` to v26 schema"); - let custody_context_v2 = CustodyContextSsz { - validator_custody_at_head: ssz_v24.validator_custody_at_head, - persisted_is_supernode: ssz_v24.persisted_is_supernode, - epoch_validator_custody_requirements: vec![], - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - PersistedCustody(custody_context_v2).as_store_bytes(), - )] - } - _ => { - vec![] - } - } - } else { - // Delete it from db if PeerDAS hasn't been scheduled - vec![KeyValueStoreOp::DeleteKey( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - )] - }; - - Ok(ops) -} - -pub fn downgrade_from_v26( - db: Arc>, -) -> Result, Error> { - let res = db.get_item::(&CUSTODY_DB_KEY); - let ops = match res { - Ok(Some(PersistedCustody(ssz_v26))) => { - info!("Migrating `CustodyContext` back from v26 schema"); - let custody_context_v24 = CustodyContextSszV24 { - validator_custody_at_head: ssz_v26.validator_custody_at_head, - persisted_is_supernode: ssz_v26.persisted_is_supernode, - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - PersistedCustodyV24(custody_context_v24).as_store_bytes(), - )] - } - _ => { - // no op if it's not on the db, as previous versions gracefully handle data missing from disk. - vec![] - } - }; - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs deleted file mode 100644 index fbe865ee27..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::BeaconChainTypes; -use std::sync::Arc; -use store::{Error, HotColdDB, metadata::SchemaVersion}; - -/// Add `DataColumnCustodyInfo` entry to v27. -pub fn upgrade_to_v27( - db: Arc>, -) -> Result<(), Error> { - if db.spec.is_peer_das_scheduled() { - db.put_data_column_custody_info(None)?; - db.store_schema_version_atomically(SchemaVersion(27), vec![])?; - } - - Ok(()) -} - -pub fn downgrade_from_v27( - db: Arc>, -) -> Result<(), Error> { - if db.spec.is_peer_das_scheduled() { - return Err(Error::MigrationError( - "Cannot downgrade from v27 if peerDAS is scheduled".to_string(), - )); - } - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs deleted file mode 100644 index 5885eaabc0..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::{ - BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, PersistedForkChoiceStoreV17, - beacon_chain::FORK_CHOICE_DB_KEY, - persisted_fork_choice::{PersistedForkChoiceV17, PersistedForkChoiceV28}, - summaries_dag::{DAGStateSummary, StateSummariesDAG}, -}; -use fork_choice::{ForkChoice, ForkChoiceStore, ResetPayloadStatuses}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::{info, warn}; -use types::{EthSpec, Hash256}; - -/// Upgrade `PersistedForkChoice` from V17 to V28. -pub fn upgrade_to_v28( - db: Arc>, -) -> Result, Error> { - let Some(persisted_fork_choice_v17) = - db.get_item::(&FORK_CHOICE_DB_KEY)? - else { - warn!("No fork choice found to upgrade to v28"); - return Ok(vec![]); - }; - - // Load state DAG in order to compute justified checkpoint roots. - let state_summaries_dag = { - let state_summaries = db - .load_hot_state_summaries()? - .into_iter() - .map(|(state_root, summary)| (state_root, summary.into())) - .collect::>(); - - StateSummariesDAG::new(state_summaries).map_err(|e| { - Error::MigrationError(format!("Error loading state summaries DAG: {e:?}")) - })? - }; - - // Determine the justified state roots. - let justified_checkpoint = persisted_fork_choice_v17 - .fork_choice_store_v17 - .justified_checkpoint; - let justified_block_root = justified_checkpoint.root; - let justified_slot = justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let justified_state_root = state_summaries_dag - .state_root_at_slot(justified_block_root, justified_slot) - .ok_or_else(|| { - Error::MigrationError(format!( - "Missing state root for justified slot {justified_slot} with latest_block_root \ - {justified_block_root:?}" - )) - })?; - - let unrealized_justified_checkpoint = persisted_fork_choice_v17 - .fork_choice_store_v17 - .unrealized_justified_checkpoint; - let unrealized_justified_block_root = unrealized_justified_checkpoint.root; - let unrealized_justified_slot = unrealized_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let unrealized_justified_state_root = state_summaries_dag - .state_root_at_slot(unrealized_justified_block_root, unrealized_justified_slot) - .ok_or_else(|| { - Error::MigrationError(format!( - "Missing state root for unrealized justified slot {unrealized_justified_slot} \ - with latest_block_root {unrealized_justified_block_root:?}" - )) - })?; - - let fc_store = BeaconForkChoiceStore::from_persisted_v17( - persisted_fork_choice_v17.fork_choice_store_v17, - justified_state_root, - unrealized_justified_state_root, - db.clone(), - ) - .map_err(|e| { - Error::MigrationError(format!( - "Error loading fork choice store from persisted: {e:?}" - )) - })?; - - info!( - ?justified_state_root, - %justified_slot, - "Added justified state root to fork choice" - ); - - // Construct top-level ForkChoice struct using the patched fork choice store, and the converted - // proto array. - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice_v17.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - db.get_chain_spec(), - ) - .map_err(|e| Error::MigrationError(format!("Unable to build ForkChoice: {e:?}")))?; - - let ops = vec![BeaconChain::::persist_fork_choice_in_batch_standalone( - &fork_choice, - db.get_config(), - )?]; - - info!("Upgraded fork choice for DB schema v28"); - - Ok(ops) -} - -pub fn downgrade_from_v28( - db: Arc>, -) -> Result, Error> { - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let Some(fork_choice) = - BeaconChain::::load_fork_choice(db.clone(), reset_payload_statuses, db.get_chain_spec()) - .map_err(|e| Error::MigrationError(format!("Unable to load fork choice: {e:?}")))? - else { - warn!("No fork choice to downgrade"); - return Ok(vec![]); - }; - - // Recreate V28 persisted fork choice, then convert each field back to its V17 version. - let persisted_fork_choice = PersistedForkChoiceV28 { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - - let justified_balances = fork_choice.fc_store().justified_balances(); - - // 1. Create `proto_array::PersistedForkChoiceV17`. - let fork_choice_v17: fork_choice::PersistedForkChoiceV17 = ( - persisted_fork_choice.fork_choice, - justified_balances.clone(), - ) - .into(); - - let fork_choice_store_v17: PersistedForkChoiceStoreV17 = ( - persisted_fork_choice.fork_choice_store, - justified_balances.clone(), - ) - .into(); - - let persisted_fork_choice_v17 = PersistedForkChoiceV17 { - fork_choice_v17, - fork_choice_store_v17, - }; - - let ops = vec![persisted_fork_choice_v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]; - - info!("Downgraded fork choice for DB schema v28"); - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs index 4ddcdaab5a..50fc0b3820 100644 --- a/beacon_node/beacon_chain/src/summaries_dag.rs +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -14,14 +14,6 @@ pub struct DAGStateSummary { pub previous_state_root: Hash256, } -#[derive(Debug, Clone, Copy)] -pub struct DAGStateSummaryV22 { - pub slot: Slot, - pub latest_block_root: Hash256, - pub block_slot: Slot, - pub block_parent_root: Hash256, -} - pub struct StateSummariesDAG { // state_root -> state_summary state_summaries_by_state_root: HashMap, @@ -40,10 +32,6 @@ pub enum Error { new_state_summary: (Slot, Hash256), }, MissingStateSummary(Hash256), - MissingStateSummaryByBlockRoot { - state_root: Hash256, - latest_block_root: Hash256, - }, MissingChildStateRoot(Hash256), RequestedSlotAboveSummary { starting_state_root: Hash256, @@ -109,89 +97,6 @@ impl StateSummariesDAG { }) } - /// Computes a DAG from a sequence of state summaries, including their parent block - /// relationships. - /// - /// - Expects summaries to be contiguous per slot: there must exist a summary at every slot - /// of each tree branch - /// - Maybe include multiple disjoint trees. The root of each tree will have a ZERO parent state - /// root, which will error later when calling `previous_state_root`. - pub fn new_from_v22( - state_summaries_v22: Vec<(Hash256, DAGStateSummaryV22)>, - ) -> Result { - // Group them by latest block root, and sorted state slot - let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); - for (state_root, summary) in state_summaries_v22.iter() { - let summaries = state_summaries_by_block_root - .entry(summary.latest_block_root) - .or_default(); - - // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) - match summaries.entry(summary.slot) { - Entry::Vacant(entry) => { - entry.insert((state_root, summary)); - } - Entry::Occupied(existing) => { - return Err(Error::DuplicateStateSummary { - block_root: summary.latest_block_root, - existing_state_summary: (summary.slot, *state_root).into(), - new_state_summary: (*existing.key(), *existing.get().0), - }); - } - } - } - - let state_summaries = state_summaries_v22 - .iter() - .map(|(state_root, summary)| { - let previous_state_root = if summary.slot == 0 { - Hash256::ZERO - } else { - let previous_slot = summary.slot - 1; - - // Check the set of states in the same state's block root - let same_block_root_summaries = state_summaries_by_block_root - .get(&summary.latest_block_root) - // Should never error: we construct the HashMap here and must have at least - // one entry per block root - .ok_or(Error::MissingStateSummaryByBlockRoot { - state_root: *state_root, - latest_block_root: summary.latest_block_root, - })?; - if let Some((state_root, _)) = same_block_root_summaries.get(&previous_slot) { - // Skipped slot: block root at previous slot is the same as latest block root. - **state_root - } else { - // Common case: not a skipped slot. - // - // If we can't find a state summmary for the parent block and previous slot, - // then there is some amount of disjointedness in the DAG. We set the parent - // state root to 0x0 in this case, and will prune any dangling states. - let parent_block_root = summary.block_parent_root; - state_summaries_by_block_root - .get(&parent_block_root) - .and_then(|parent_block_summaries| { - parent_block_summaries.get(&previous_slot) - }) - .map_or(Hash256::ZERO, |(parent_state_root, _)| **parent_state_root) - } - }; - - Ok(( - *state_root, - DAGStateSummary { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - latest_block_slot: summary.block_slot, - previous_state_root, - }, - )) - }) - .collect::, _>>()?; - - Self::new(state_summaries) - } - // Returns all non-unique latest block roots of a given set of states pub fn blocks_of_states<'a, I: Iterator>( &self, @@ -379,106 +284,3 @@ impl From for DAGStateSummary { } } } - -#[cfg(test)] -mod tests { - use super::{DAGStateSummaryV22, Error, StateSummariesDAG}; - use bls::FixedBytesExtended; - use types::{Hash256, Slot}; - - fn root(n: u64) -> Hash256 { - Hash256::from_low_u64_le(n) - } - - #[test] - fn new_from_v22_empty() { - StateSummariesDAG::new_from_v22(vec![]).unwrap(); - } - - fn assert_previous_state_root_is_zero(dag: &StateSummariesDAG, root: Hash256) { - assert!(matches!( - dag.previous_state_root(root).unwrap_err(), - Error::RootUnknownPreviousStateRoot { .. } - )); - } - - #[test] - fn new_from_v22_one_state() { - let root_a = root(0xa); - let root_1 = root(1); - let root_2 = root(2); - let summary_1 = DAGStateSummaryV22 { - slot: Slot::new(1), - latest_block_root: root_1, - block_parent_root: root_2, - block_slot: Slot::new(1), - }; - - let dag = StateSummariesDAG::new_from_v22(vec![(root_a, summary_1)]).unwrap(); - - // The parent of the root summary is ZERO - assert_previous_state_root_is_zero(&dag, root_a); - } - - #[test] - fn new_from_v22_multiple_states() { - let dag = StateSummariesDAG::new_from_v22(vec![ - ( - root(0xa), - DAGStateSummaryV22 { - slot: Slot::new(3), - latest_block_root: root(3), - block_parent_root: root(1), - block_slot: Slot::new(3), - }, - ), - ( - root(0xb), - DAGStateSummaryV22 { - slot: Slot::new(4), - latest_block_root: root(4), - block_parent_root: root(3), - block_slot: Slot::new(4), - }, - ), - // fork 1 - ( - root(0xc), - DAGStateSummaryV22 { - slot: Slot::new(5), - latest_block_root: root(5), - block_parent_root: root(4), - block_slot: Slot::new(5), - }, - ), - // fork 2 - // skipped slot - ( - root(0xd), - DAGStateSummaryV22 { - slot: Slot::new(5), - latest_block_root: root(4), - block_parent_root: root(3), - block_slot: Slot::new(4), - }, - ), - // normal slot - ( - root(0xe), - DAGStateSummaryV22 { - slot: Slot::new(6), - latest_block_root: root(6), - block_parent_root: root(4), - block_slot: Slot::new(6), - }, - ), - ]) - .unwrap(); - - // The parent of the root summary is ZERO - assert_previous_state_root_is_zero(&dag, root(0xa)); - assert_eq!(dag.previous_state_root(root(0xc)).unwrap(), root(0xb)); - assert_eq!(dag.previous_state_root(root(0xd)).unwrap(), root(0xb)); - assert_eq!(dag.previous_state_root(root(0xe)).unwrap(), root(0xd)); - } -} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 366d1dc831..c53c29438e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,5 +1,5 @@ use crate::blob_verification::GossipVerifiedBlob; -use crate::block_verification_types::{AsBlock, AvailableBlockData, RpcBlock}; +use crate::block_verification_types::{AsBlock, AvailableBlockData, LookupBlock, RangeSyncBlock}; use crate::custody_context::NodeCustodyType; use crate::data_availability_checker::DataAvailabilityChecker; use crate::graffiti_calculator::GraffitiSettings; @@ -49,12 +49,13 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; use ssz_types::{RuntimeVariableList, VariableList}; +use state_processing::ConsensusContext; use state_processing::per_block_processing::compute_timestamp_at_slot; -use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; -use state_processing::state_advance::complete_state_advance; -use state_processing::{ - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, per_block_processing, +use state_processing::per_block_processing::{ + BlockSignatureStrategy, VerifyBlockRoot, deneb::kzg_commitment_to_versioned_hash, + per_block_processing, }; +use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; @@ -822,20 +823,20 @@ where mock_builder_server } - pub fn get_head_block(&self) -> RpcBlock { + pub fn get_head_block(&self) -> RangeSyncBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - self.build_rpc_block_from_store_blobs(Some(block_root), block) + self.build_range_sync_block_from_store_blobs(Some(block_root), block) } - pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { + pub fn get_full_block(&self, block_root: &Hash256) -> RangeSyncBlock { let block = self .chain .get_blinded_block(block_root) .unwrap() .unwrap_or_else(|| panic!("block root does not exist in harness {block_root:?}")); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) + self.build_range_sync_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } pub fn get_all_validators(&self) -> Vec { @@ -1339,15 +1340,12 @@ where let signed_block = self.sign_beacon_block(block, state); let block_root = signed_block.canonical_root(); - let rpc_block = RpcBlock::BlockOnly { - block_root, - block: Arc::new(signed_block), - }; + let lookup_block = LookupBlock::new(Arc::new(signed_block)); self.chain.slot_clock.set_slot(slot.as_u64()); self.chain .process_block( block_root, - rpc_block, + lookup_block, NotifyExecutionLayer::No, BlockImportSource::Lookup, || Ok(()), @@ -2606,20 +2604,33 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); let is_available = !has_blob_commitments || blob_items.is_some(); + let block_hash: SignedBeaconBlockHash = if !is_available { + self.chain + .process_block( + block_root, + LookupBlock::new(block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + } else { + let range_sync_block = self.build_range_sync_block_from_blobs(block, blob_items)?; + self.chain + .process_block( + block_root, + range_sync_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + }; - let rpc_block = self.build_rpc_block_from_blobs(block, blob_items, is_available)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2639,19 +2650,33 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); let is_available = !has_blob_commitments || blob_items.is_some(); - let rpc_block = self.build_rpc_block_from_blobs(block, blob_items, is_available)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); + let block_hash: SignedBeaconBlockHash = if is_available { + let range_sync_block = self.build_range_sync_block_from_blobs(block, blob_items)?; + self.chain + .process_block( + block_root, + range_sync_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + } else { + self.chain + .process_block( + block_root, + LookupBlock::new(block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + }; + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2734,13 +2759,13 @@ where state_root } - /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from + /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. - pub fn build_rpc_block_from_store_blobs( + pub fn build_range_sync_block_from_store_blobs( &self, block_root: Option, block: Arc>, - ) -> RpcBlock { + ) -> RangeSyncBlock { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); let has_blobs = block .message() @@ -2748,9 +2773,9 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { - return RpcBlock::new( + return RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2767,9 +2792,9 @@ where .unwrap(); let custody_columns = columns.into_iter().collect::>(); let block_data = AvailableBlockData::new_with_data_columns(custody_columns); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2782,9 +2807,9 @@ where AvailableBlockData::NoData }; - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2792,18 +2817,17 @@ where } } - /// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`. - pub fn build_rpc_block_from_blobs( + /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and `BlobsList`. + pub fn build_range_sync_block_from_blobs( &self, block: Arc>>, blob_items: Option<(KzgProofs, BlobsList)>, - is_available: bool, - ) -> Result, BlockError> { + ) -> Result, BlockError> { Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { let epoch = block.slot().epoch(E::slots_per_epoch()); let sampling_columns = self.chain.sampling_columns_for_epoch(epoch); - if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) { + if blob_items.is_some_and(|(kzg_proofs, _)| !kzg_proofs.is_empty()) { // Note: this method ignores the actual custody columns and just take the first // `sampling_column_count` for testing purpose only, because the chain does not // currently have any knowledge of the columns being custodied. @@ -2811,33 +2835,17 @@ where .into_iter() .filter(|d| sampling_columns.contains(d.index())) .collect::>(); - if is_available { - let block_data = AvailableBlockData::new_with_data_columns(columns); - RpcBlock::new( - block, - Some(block_data), - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } else { - RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } - } else if is_available { - RpcBlock::new( + let block_data = AvailableBlockData::new_with_data_columns(columns); + RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), )? } else { - RpcBlock::new( + RangeSyncBlock::new( block, - None, + AvailableBlockData::NoData, &self.chain.data_availability_checker, self.chain.spec.clone(), )? @@ -2849,27 +2857,18 @@ where }) .transpose() .unwrap(); - if is_available { - let block_data = if let Some(blobs) = blobs { - AvailableBlockData::new_with_blobs(blobs) - } else { - AvailableBlockData::NoData - }; - - RpcBlock::new( - block, - Some(block_data), - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? + let block_data = if let Some(blobs) = blobs { + AvailableBlockData::new_with_blobs(blobs) } else { - RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } + AvailableBlockData::NoData + }; + + RangeSyncBlock::new( + block, + block_data, + &self.chain.data_availability_checker, + self.chain.spec.clone(), + )? }) } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index a1922f32a4..bca60d27cd 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_simulator::produce_unaggregated_attestation; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; @@ -223,19 +222,9 @@ async fn produces_attestations() { assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); - let rpc_block = - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); - - let available_block = match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .unwrap(); - available_block - } - RpcBlock::BlockOnly { .. } => panic!("block should be available"), - }; + let range_sync_block = harness + .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); + let available_block = range_sync_block.into_available_block(); let early_attestation = { let proto_block = chain @@ -292,20 +281,12 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); - let rpc_block = harness - .build_rpc_block_from_store_blobs(Some(head.beacon_block_root), head.beacon_block.clone()); - - let available_block = match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - harness - .chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .unwrap(); - available_block - } - RpcBlock::BlockOnly { .. } => panic!("block should be available"), - }; + let available_block = harness + .build_range_sync_block_from_store_blobs( + Some(head.beacon_block_root), + head.beacon_block.clone(), + ) + .into_available_block(); harness .chain diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e8ee628f28..acf326430b 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::{ Error, batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index ee61177b2a..0ee9a7dba6 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -5,7 +5,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, - block_verification_types::AsBlock, + block_verification_types::{AsBlock, LookupBlock}, }; use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; @@ -76,14 +76,11 @@ async fn rpc_blobs_with_invalid_header_signature() { // Process the block without blobs so that it doesn't become available. harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .unwrap(); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index e385e0dc48..2bb60f111a 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,6 +1,6 @@ #![cfg(not(debug_assertions))] // TODO(gloas) we probably need similar test for payload envelope verification -use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, LookupBlock, RangeSyncBlock}; use beacon_chain::data_availability_checker::{AvailabilityCheckError, AvailableBlockData}; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ @@ -13,7 +13,7 @@ use beacon_chain::{ }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, - InvalidSignature, NotifyExecutionLayer, signature_verify_chain_segment, + InvalidSignature, NotifyExecutionLayer, }; use bls::{AggregateSignature, Keypair, Signature}; use fixed_bytes::FixedBytesExtended; @@ -77,8 +77,10 @@ async fn get_chain_segment() -> (Vec>, Vec( chain_segment: &[BeaconSnapshot], chain_segment_sidecars: &[Option>], chain: Arc>, -) -> Vec> +) -> Vec> where T: BeaconChainTypes, { @@ -145,25 +147,25 @@ where .zip(chain_segment_sidecars.iter()) .map(|(snapshot, data_sidecars)| { let block = snapshot.beacon_block.clone(); - build_rpc_block(block, data_sidecars, chain.clone()) + build_range_sync_block(block, data_sidecars, chain.clone()) }) .collect() } -fn build_rpc_block( +fn build_range_sync_block( block: Arc>, data_sidecars: &Option>, chain: Arc>, -) -> RpcBlock +) -> RangeSyncBlock where T: BeaconChainTypes, { match data_sidecars { Some(DataSidecars::Blobs(blobs)) => { let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) @@ -176,17 +178,17 @@ where .map(|c| c.as_data_column().clone()) .collect::>(), ); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) .unwrap() } - None => RpcBlock::new( + None => RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &chain.data_availability_checker, chain.spec.clone(), ) @@ -301,7 +303,7 @@ fn update_data_column_signed_header( async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -339,7 +341,7 @@ async fn chain_segment_full_segment() { async fn chain_segment_varying_chunk_size() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - let blocks: Vec> = + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -384,7 +386,7 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -405,7 +407,7 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -413,9 +415,9 @@ async fn chain_segment_non_linear_parent_roots() { let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -447,15 +449,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -477,15 +479,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.chain.spec.clone(), ) @@ -512,11 +514,11 @@ async fn assert_invalid_signature( snapshots: &[BeaconSnapshot], item: &str, ) { - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); @@ -543,7 +545,7 @@ async fn assert_invalid_signature( .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been @@ -558,7 +560,7 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - build_rpc_block( + build_range_sync_block( snapshots[block_index].beacon_block.clone(), &chain_segment_blobs[block_index], harness.chain.clone(), @@ -620,7 +622,7 @@ async fn invalid_signature_gossip_block() { .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); harness @@ -630,18 +632,12 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); - let rpc_block = RpcBlock::new( - Arc::new(signed_block), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let lookup_block = LookupBlock::new(Arc::new(signed_block)); let process_res = harness .chain .process_block( - rpc_block.block_root(), - rpc_block, + lookup_block.block_root(), + lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -675,11 +671,11 @@ async fn invalid_signature_block_proposal() { block.clone(), junk_signature(), )); - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. @@ -994,11 +990,11 @@ async fn invalid_signature_deposit() { Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); assert!( @@ -1641,9 +1637,9 @@ async fn add_base_block_to_altair_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let base_rpc_block = RpcBlock::new( + let base_range_sync_block = RangeSyncBlock::new( Arc::new(base_block.clone()), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -1652,8 +1648,8 @@ async fn add_base_block_to_altair_chain() { harness .chain .process_block( - base_rpc_block.block_root(), - base_rpc_block, + base_range_sync_block.block_root(), + base_range_sync_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1672,9 +1668,9 @@ async fn add_base_block_to_altair_chain() { .chain .process_chain_segment( vec![ - RpcBlock::new( + RangeSyncBlock::new( Arc::new(base_block), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone() ) @@ -1792,19 +1788,13 @@ async fn add_altair_block_to_base_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let altair_rpc_block = RpcBlock::new( - Arc::new(altair_block.clone()), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let altair_lookup_block = LookupBlock::new(Arc::new(altair_block.clone())); assert!(matches!( harness .chain .process_block( - altair_rpc_block.block_root(), - altair_rpc_block, + altair_lookup_block.block_root(), + altair_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1823,9 +1813,9 @@ async fn add_altair_block_to_base_chain() { .chain .process_chain_segment( vec![ - RpcBlock::new( + RangeSyncBlock::new( Arc::new(altair_block), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone() ) @@ -1891,18 +1881,18 @@ async fn import_duplicate_block_unrealized_justification() { // Create two verified variants of the block, representing the same block being processed in // parallel. let notify_execution_layer = NotifyExecutionLayer::Yes; - let rpc_block = RpcBlock::new( + let range_sync_block = RangeSyncBlock::new( block.clone(), - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone(), ) .unwrap(); - let verified_block1 = rpc_block + let verified_block1 = range_sync_block .clone() .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); - let verified_block2 = rpc_block + let verified_block2 = range_sync_block .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); @@ -1972,48 +1962,9 @@ async fn import_execution_pending_block( } } -// Test that `signature_verify_chain_segment` errors with a chain segment of mixed `FullyAvailable` -// and `BlockOnly` RpcBlocks. This situation should never happen in production. -#[tokio::test] -async fn signature_verify_mixed_rpc_block_variants() { - let (snapshots, data_sidecars) = get_chain_segment().await; - let snapshots: Vec<_> = snapshots.into_iter().take(10).collect(); - let data_sidecars: Vec<_> = data_sidecars.into_iter().take(10).collect(); - - let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - - let mut chain_segment = Vec::new(); - - for (i, (snapshot, blobs)) in snapshots.iter().zip(data_sidecars.iter()).enumerate() { - let block = snapshot.beacon_block.clone(); - let block_root = snapshot.beacon_block_root; - - // Alternate between FullyAvailable and BlockOnly - let rpc_block = if i % 2 == 0 { - // FullyAvailable - with blobs/columns if needed - build_rpc_block(block, blobs, harness.chain.clone()) - } else { - // BlockOnly - no data - RpcBlock::new( - block, - None, - &harness.chain.data_availability_checker, - harness.chain.spec.clone(), - ) - .unwrap() - }; - - chain_segment.push((block_root, rpc_block)); - } - - // This should error because `signature_verify_chain_segment` expects a list - // of `RpcBlock::FullyAvailable`. - assert!(signature_verify_chain_segment(chain_segment.clone(), &harness.chain).is_err()); -} - // Test that RpcBlock::new() rejects blocks when blob count doesn't match expected. #[tokio::test] -async fn rpc_block_construction_fails_with_wrong_blob_count() { +async fn range_sync_block_construction_fails_with_wrong_blob_count() { let spec = test_spec::(); if !spec.fork_name_at_slot::(Slot::new(0)).deneb_enabled() @@ -2064,9 +2015,9 @@ async fn rpc_block_construction_fails_with_wrong_blob_count() { let block_data = AvailableBlockData::new_with_blobs(wrong_blobs); // Try to create RpcBlock with wrong blob count - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(block_data), + block_data, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); @@ -2086,7 +2037,7 @@ async fn rpc_block_construction_fails_with_wrong_blob_count() { // Test that RpcBlock::new() rejects blocks when custody columns are incomplete. #[tokio::test] -async fn rpc_block_rejects_missing_custody_columns() { +async fn range_sync_block_rejects_missing_custody_columns() { let spec = test_spec::(); if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { @@ -2139,9 +2090,9 @@ async fn rpc_block_rejects_missing_custody_columns() { let block_data = AvailableBlockData::new_with_data_columns(incomplete_columns); // Try to create RpcBlock with incomplete custody columns - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(block_data), + block_data, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); @@ -2227,9 +2178,9 @@ async fn rpc_block_allows_construction_past_da_boundary() { // Try to create RpcBlock with NoData for a block past DA boundary // This should succeed since columns are not expected for blocks past DA boundary - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 9941c957e2..6114bd7f45 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -7,7 +7,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, - block_verification_types::AsBlock, + block_verification_types::{AsBlock, LookupBlock}, }; use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; @@ -80,16 +80,13 @@ async fn rpc_columns_with_invalid_header_signature() { // Process the block without blobs so that it doesn't become available. harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .unwrap(); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -169,16 +166,13 @@ async fn verify_header_signature_fork_block_bug() { // The block will be accepted but won't become the head because it's not fully available. // This keeps the head at the pre-fork state (Electra). harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .expect("Should build RPC block"); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index b282adecd5..3ed8f59838 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::{ BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, StateSkipConfig, @@ -685,19 +686,13 @@ async fn invalidates_all_descendants() { assert_eq!(fork_parent_state.slot(), fork_parent_slot); let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); let fork_block_root = rig .harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -795,19 +790,13 @@ async fn switches_heads() { let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); let fork_block_root = rig .harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1085,15 +1074,9 @@ async fn invalid_parent() { )); // Ensure the block built atop an invalid payload is invalid for import. - let rpc_block = RpcBlock::new( - block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let lookup_block = LookupBlock::new(block.clone()); assert!(matches!( - rig.harness.chain.process_block(rpc_block.block_root(), rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, + rig.harness.chain.process_block(lookup_block.block_root(), lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1347,18 +1330,12 @@ async fn recover_from_invalid_head_by_importing_blocks() { } = InvalidHeadSetup::new().await; // Import the fork block, it should become the head. - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); rig.harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 9fa6eaf2e6..ce5864a9d4 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,7 +1,8 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::Error as AttnError; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::data_availability_checker::AvailableBlock; @@ -147,6 +148,22 @@ fn get_harness_generic( harness } +/// Check that all database invariants hold. +/// +/// Panics with a descriptive message if any invariant is violated. +fn check_db_invariants(harness: &TestHarness) { + let result = harness + .chain + .check_database_invariants() + .expect("invariant check should not error"); + + assert!( + result.is_ok(), + "database invariant violations found:\n{:#?}", + result.violations, + ); +} + fn get_states_descendant_of_block( store: &HotColdDB, BeaconNodeBackend>, block_root: Hash256, @@ -307,6 +324,7 @@ async fn full_participation_no_skips() { check_split_slot(&harness, store); check_chain_dump(&harness, num_blocks_produced + 1); check_iterators(&harness); + check_db_invariants(&harness); } #[tokio::test] @@ -351,6 +369,7 @@ async fn randomised_skips() { check_split_slot(&harness, store.clone()); check_chain_dump(&harness, num_blocks_produced + 1); check_iterators(&harness); + check_db_invariants(&harness); } #[tokio::test] @@ -399,6 +418,7 @@ async fn long_skip() { check_split_slot(&harness, store); check_chain_dump(&harness, initial_blocks + final_blocks + 1); check_iterators(&harness); + check_db_invariants(&harness); } /// Go forward to the point where the genesis randao value is no longer part of the vector. @@ -1773,6 +1793,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { } assert!(!rig.knows_head(&stray_head)); + + check_db_invariants(&rig); } #[tokio::test] @@ -1901,6 +1923,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(!rig.knows_head(&stray_head)); let chain_dump = rig.chain.chain_dump().unwrap(); assert!(get_blocks(&chain_dump).contains(&shared_head)); + + check_db_invariants(&rig); } #[tokio::test] @@ -1992,6 +2016,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { } rig.assert_knows_head(stray_head.into()); + + check_db_invariants(&rig); } #[tokio::test] @@ -2131,6 +2157,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { } assert!(!rig.knows_head(&stray_head)); + + check_db_invariants(&rig); } // This is to check if state outside of normal block processing are pruned correctly. @@ -2381,6 +2409,8 @@ async fn finalizes_non_epoch_start_slot() { state_hash ); } + + check_db_invariants(&rig); } fn check_all_blocks_exist<'a>( @@ -2647,6 +2677,8 @@ async fn pruning_test( check_all_states_exist(&harness, all_canonical_states.iter()); check_no_states_exist(&harness, stray_states.difference(&all_canonical_states)); check_no_blocks_exist(&harness, stray_blocks.values()); + + check_db_invariants(&harness); } #[tokio::test] @@ -2711,6 +2743,8 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { vec![(genesis_state_root, Slot::new(0))], "get_states_descendant_of_block({bad_block_parent_root:?})" ); + + check_db_invariants(&harness); } #[tokio::test] @@ -3110,7 +3144,10 @@ async fn weak_subjectivity_sync_test( beacon_chain .process_block( full_block_root, - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), + harness.build_range_sync_block_from_store_blobs( + Some(block_root), + Arc::new(full_block), + ), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3180,20 +3217,16 @@ async fn weak_subjectivity_sync_test( .expect("should get block") .expect("should get block"); - let rpc_block = - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)); + let range_sync_block = harness + .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(full_block)); - match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - harness - .chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .expect("should verify kzg"); - available_blocks.push(available_block); - } - RpcBlock::BlockOnly { .. } => panic!("Should be an available block"), - } + let fully_available_block = range_sync_block.into_available_block(); + harness + .chain + .data_availability_checker + .verify_kzg_for_available_block(&fully_available_block) + .expect("should verify kzg"); + available_blocks.push(fully_available_block); } // Corrupt the signature on the 1st block to ensure that the backfill processor is checking @@ -3365,6 +3398,16 @@ async fn weak_subjectivity_sync_test( store.clone().reconstruct_historic_states(None).unwrap(); assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0)); + + // Check database invariants after full checkpoint sync + backfill + reconstruction. + let result = beacon_chain + .check_database_invariants() + .expect("invariant check should not error"); + assert!( + result.is_ok(), + "database invariant violations:\n{:#?}", + result.violations, + ); } // This test prunes data columns from epoch 0 and then tries to re-import them via @@ -3754,19 +3797,13 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert_eq!(split.block_root, valid_fork_block.parent_root()); assert_ne!(split.state_root, unadvanced_split_state_root); - let invalid_fork_rpc_block = RpcBlock::new( - invalid_fork_block.clone(), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let invalid_fork_lookup_block = LookupBlock::new(invalid_fork_block.clone()); // Applying the invalid block should fail. let err = harness .chain .process_block( - invalid_fork_rpc_block.block_root(), - invalid_fork_rpc_block, + invalid_fork_lookup_block.block_root(), + invalid_fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3776,18 +3813,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); // Applying the valid block should succeed, but it should not become head. - let valid_fork_rpc_block = RpcBlock::new( - valid_fork_block.clone(), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let valid_fork_lookup_block = LookupBlock::new(valid_fork_block.clone()); harness .chain .process_block( - valid_fork_rpc_block.block_root(), - valid_fork_rpc_block, + valid_fork_lookup_block.block_root(), + valid_fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3964,11 +3995,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = if spec.is_fulu_scheduled() { - SchemaVersion(27) - } else { - SchemaVersion(22) - }; + let min_version = CURRENT_SCHEMA_VERSION; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -5568,6 +5595,7 @@ async fn test_gloas_block_and_envelope_storage_generic( "slot = {slot}" ); } + check_db_invariants(&harness); } /// Test that Pending and Full states have the correct payload status through round-trip @@ -5635,6 +5663,7 @@ async fn test_gloas_state_payload_status() { state = full_state; } + check_db_invariants(&harness); } /// Test block replay with and without envelopes. @@ -5774,6 +5803,7 @@ async fn test_gloas_block_replay_with_envelopes() { replayed_full, expected_full, "replayed full state should match stored full state" ); + check_db_invariants(&harness); } /// Test the hot state hierarchy with Full states stored as ReplayFrom. @@ -5791,7 +5821,7 @@ async fn test_gloas_hot_state_hierarchy() { // 40 slots covers 5 epochs. let num_blocks = E::slots_per_epoch() * 5; // TODO(gloas): enable finalisation by increasing this threshold - let some_validators = (0..LOW_VALIDATOR_COUNT / 2).collect::>(); + let some_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); @@ -5855,6 +5885,7 @@ async fn test_gloas_hot_state_hierarchy() { // Verify chain dump and iterators work with Gloas states. check_chain_dump(&harness, num_blocks + 1); check_iterators(&harness); + check_db_invariants(&harness); } /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. diff --git a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs index 79596bb4a6..38306b3bb6 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs @@ -102,7 +102,7 @@ pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. EarlyBlock(QueuedGossipBlock), /// An execution payload envelope that references a block not yet in fork choice. - UnknownBlockEnvelope(QueuedGossipEnvelope), + UnknownBlockForEnvelope(QueuedGossipEnvelope), /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), @@ -535,25 +535,38 @@ impl ReprocessQueue { } // An envelope that references an unknown block. Queue it until the block is // imported, or until the timeout expires. - InboundEvent::Msg(UnknownBlockEnvelope(queued_envelope)) => { + InboundEvent::Msg(UnknownBlockForEnvelope(queued_envelope)) => { let block_root = queued_envelope.beacon_block_root; + // TODO(gloas): Perform lightweight pre-validation before queuing + // (e.g. verify builder signature) to prevent unsigned garbage from + // consuming queue slots. + // Don't add the same envelope to the queue twice. This prevents DoS attacks. if self.awaiting_envelopes_per_root.contains_key(&block_root) { + trace!( + ?block_root, + "Duplicate envelope for same block root, dropping" + ); return; } - // Check to ensure this won't over-fill the queue. + // When the queue is full, evict the oldest entry to make room for newer envelopes. if self.awaiting_envelopes_per_root.len() >= MAXIMUM_QUEUED_ENVELOPES { if self.envelope_delay_debounce.elapsed() { warn!( queue_size = MAXIMUM_QUEUED_ENVELOPES, msg = "system resources may be saturated", - "Envelope delay queue is full" + "Envelope delay queue is full, evicting oldest entry" ); } - // Drop the envelope. - return; + if let Some(oldest_root) = + self.awaiting_envelopes_per_root.keys().next().copied() + && let Some((_envelope, delay_key)) = + self.awaiting_envelopes_per_root.remove(&oldest_root) + { + self.envelope_delay_queue.remove(&delay_key); + } } // Register the timeout. @@ -892,12 +905,18 @@ impl ReprocessQueue { InboundEvent::ReadyEnvelope(block_root) => { if let Some((envelope, _delay_key)) = self.awaiting_envelopes_per_root.remove(&block_root) - && self + { + debug!( + ?block_root, + "Envelope timed out waiting for block, sending for processing" + ); + if self .ready_work_tx .try_send(ReadyWork::Envelope(envelope)) .is_err() - { - error!(?block_root, "Failed to send envelope after timeout"); + { + error!(?block_root, "Failed to send envelope after timeout"); + } } } InboundEvent::ReadyAttestation(queued_id) => { @@ -1442,4 +1461,163 @@ mod tests { assert_eq!(reconstruction.block_root, block_root); } } + + // Test that envelopes are properly cleaned up from `awaiting_envelopes_per_root` on timeout. + #[tokio::test] + async fn prune_awaiting_envelopes_per_root() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + + // Insert an envelope. + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + + // Process the event to enter it into the delay queue. + queue.handle_message(InboundEvent::Msg(msg)); + + // Check that it is queued. + assert_eq!(queue.awaiting_envelopes_per_root.len(), 1); + assert!( + queue + .awaiting_envelopes_per_root + .contains_key(&beacon_block_root) + ); + + // Advance time to expire the envelope. + advance_time( + &queue.slot_clock, + queue.slot_clock.slot_duration() * QUEUED_ENVELOPE_DELAY_SLOTS * 2, + ) + .await; + let ready_msg = queue.next().await.unwrap(); + assert!(matches!(ready_msg, InboundEvent::ReadyEnvelope(_))); + queue.handle_message(ready_msg); + + // The entry for the block root should be gone. + assert!(queue.awaiting_envelopes_per_root.is_empty()); + } + + #[tokio::test] + async fn envelope_released_on_block_imported() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + let parent_root = Hash256::repeat_byte(0xab); + + // Insert an envelope. + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + + // Process the event to enter it into the delay queue. + queue.handle_message(InboundEvent::Msg(msg)); + + // Check that it is queued. + assert_eq!(queue.awaiting_envelopes_per_root.len(), 1); + + // Simulate block import. + let imported = ReprocessQueueMessage::BlockImported { + block_root: beacon_block_root, + parent_root, + }; + queue.handle_message(InboundEvent::Msg(imported)); + + // The entry for the block root should be gone. + assert!(queue.awaiting_envelopes_per_root.is_empty()); + // Delay queue entry should also be cancelled. + assert_eq!(queue.envelope_delay_queue.len(), 0); + } + + #[tokio::test] + async fn envelope_dedup_drops_second() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + + // Insert an envelope. + let msg1 = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + let msg2 = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + + // Process both events. + queue.handle_message(InboundEvent::Msg(msg1)); + queue.handle_message(InboundEvent::Msg(msg2)); + + // Only one should be queued. + assert_eq!(queue.awaiting_envelopes_per_root.len(), 1); + assert_eq!(queue.envelope_delay_queue.len(), 1); + } + + #[tokio::test] + async fn envelope_capacity_evicts_oldest() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + // Fill the queue to capacity. + for i in 0..MAXIMUM_QUEUED_ENVELOPES { + let block_root = Hash256::repeat_byte(i as u8); + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root: block_root, + process_fn: Box::pin(async {}), + }); + queue.handle_message(InboundEvent::Msg(msg)); + } + assert_eq!( + queue.awaiting_envelopes_per_root.len(), + MAXIMUM_QUEUED_ENVELOPES + ); + + // One more should evict the oldest and insert the new one. + let overflow_root = Hash256::repeat_byte(0xff); + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root: overflow_root, + process_fn: Box::pin(async {}), + }); + queue.handle_message(InboundEvent::Msg(msg)); + + // Queue should still be at capacity, with the new root present. + assert_eq!( + queue.awaiting_envelopes_per_root.len(), + MAXIMUM_QUEUED_ENVELOPES + ); + assert!( + queue + .awaiting_envelopes_per_root + .contains_key(&overflow_root) + ); + } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d6796f6a05..90968fa213 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2048,7 +2048,7 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| { Withdrawals::::try_from(withdrawals) - .map_err(InvalidBuilderPayload::SszTypesError) + .map_err(|e| Box::new(InvalidBuilderPayload::SszTypesError(e))) .map(|w| w.tree_hash_root()) }) .transpose()?; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index d7795e6cdf..a66f7a9b55 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -995,7 +995,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use kzg::{Bytes48, CellRef, KzgBlobRef, trusted_setup::get_trusted_setup}; + use kzg::{CellRef, KzgBlobRef, trusted_setup::get_trusted_setup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] @@ -1021,10 +1021,11 @@ mod test { fn validate_blob_bundle_v1() -> Result<(), String> { let kzg = load_kzg()?; let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; - let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) - .map(Box::new) - .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; - kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + let kzg_blob: KzgBlobRef = blob + .as_ref() + .try_into() + .map_err(|e| format!("Error converting blob to kzg blob ref: {e:?}"))?; + kzg.verify_blob_kzg_proof(kzg_blob, kzg_commitment, kzg_proof) .map_err(|e| format!("Invalid blobs bundle: {e:?}")) } @@ -1034,8 +1035,8 @@ mod test { load_test_blobs_bundle_v2::().map(|(commitment, proofs, blob)| { let kzg_blob: KzgBlobRef = blob.as_ref().try_into().unwrap(); ( - vec![Bytes48::from(commitment); proofs.len()], - proofs.into_iter().map(|p| p.into()).collect::>(), + vec![commitment.0; proofs.len()], + proofs.into_iter().map(|p| p.0).collect::>(), kzg.compute_cells(kzg_blob).unwrap(), ) })?; diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index c8e7267129..0751830851 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -132,7 +132,9 @@ pub async fn publish_execution_payload_envelope( let ctx = chain.gossip_verification_context(); let Ok(gossip_verifed_envelope) = GossipVerifiedEnvelope::new(signed_envelope, &ctx) else { warn!(%slot, %beacon_block_root, "Execution payload envelope rejected"); - return Err(warp_utils::reject::custom_bad_request("execution payload envelope rejected, gossip verification".to_string())); + return Err(warp_utils::reject::custom_bad_request( + "execution payload envelope rejected, gossip verification".to_string(), + )); }; // Import the envelope locally (runs state transition and notifies the EL). diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs index 50be7211d8..84ef3c1f26 100644 --- a/beacon_node/http_api/src/beacon/states.rs +++ b/beacon_node/http_api/src/beacon/states.rs @@ -3,17 +3,20 @@ use crate::task_spawner::{Priority, TaskSpawner}; use crate::utils::ResponseFilter; use crate::validator::pubkey_to_validator_index; use crate::version::{ - ResponseIncludesVersion, add_consensus_version_header, + ResponseIncludesVersion, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::{ - ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, - ValidatorsRequestBody, + self as api_types, ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, + ValidatorIndexData, ValidatorsRequestBody, }; +use ssz::Encode; use std::sync::Arc; use types::{AttestationShufflingId, BeaconStateError, CommitteeCache, EthSpec, RelativeEpoch}; use warp::filters::BoxedFilter; +use warp::http::Response; +use warp::hyper::Body; use warp::{Filter, Reply}; use warp_utils::query::multi_key_query; @@ -160,6 +163,67 @@ pub fn get_beacon_state_pending_deposits( .boxed() } +// GET beacon/states/{state_id}/proposer_lookahead +pub fn get_beacon_state_proposer_lookahead( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("proposer_lookahead")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(lookahead) = state.proposer_lookahead() else { + return Err(warp_utils::reject::custom_bad_request( + "Proposer lookahead is not available for pre-Fulu states" + .to_string(), + )); + }; + + Ok(( + lookahead.to_vec(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(data.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + ValidatorIndexData(data), + ) + .map(|res| warp::reply::json(&res).into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + // GET beacon/states/{state_id}/randao?epoch pub fn get_beacon_state_randao( beacon_states_path: BeaconStatesPath, diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 8a50ec45b0..4737d92079 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -2,6 +2,7 @@ use beacon_chain::store::metadata::CURRENT_SCHEMA_VERSION; use beacon_chain::{BeaconChain, BeaconChainTypes}; use serde::Serialize; use std::sync::Arc; +use store::invariants::InvariantCheckResult; use store::{AnchorInfo, BlobInfo, Split, StoreConfig}; #[derive(Debug, Serialize)] @@ -30,3 +31,11 @@ pub fn info( blob_info, }) } + +pub fn check_invariants( + chain: Arc>, +) -> Result { + chain.check_database_invariants().map_err(|e| { + warp_utils::reject::custom_bad_request(format!("error checking database invariants: {e:?}")) + }) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fd0c4a3867..58e9d1f502 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::result_large_err)] //! This crate contains a HTTP server which serves the endpoints listed here: //! //! https://github.com/ethereum/beacon-APIs @@ -6,12 +7,9 @@ //! used for development. mod aggregate_attestation; -mod attestation_performance; mod attester_duties; mod beacon; mod block_id; -mod block_packing_efficiency; -mod block_rewards; mod build_block_contents; mod builder_states; mod custody; @@ -264,6 +262,7 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( let get_beacon_state_pending_consolidations = states::get_beacon_state_pending_consolidations(beacon_states_path.clone()); + // GET beacon/states/{state_id}/proposer_lookahead + let get_beacon_state_proposer_lookahead = + states::get_beacon_state_proposer_lookahead(beacon_states_path.clone()); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -1805,8 +1808,16 @@ pub fn serve( let execution_optimistic = chain.is_optimistic_or_invalid_head().unwrap_or_default(); - Ok(api_types::GenericResponse::from(attestation_rewards)) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + let finalized = epoch + 2 + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch; + + Ok(api_types::GenericResponse::from(attestation_rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -2473,7 +2484,7 @@ pub fn serve( // GET validator/duties/proposer/{epoch} let get_validator_duties_proposer = get_validator_duties_proposer( - eth_v1.clone(), + any_version.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -3011,6 +3022,19 @@ pub fn serve( }, ); + // GET lighthouse/database/invariants + let get_lighthouse_database_invariants = database_path + .and(warp::path("invariants")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner + .blocking_json_task(Priority::P1, move || database::check_invariants(chain)) + }, + ); + // POST lighthouse/database/reconstruct let post_lighthouse_database_reconstruct = database_path .and(warp::path("reconstruct")) @@ -3074,67 +3098,6 @@ pub fn serve( }, ); - // GET lighthouse/analysis/block_rewards - let get_lighthouse_block_rewards = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("block_rewards")) - .and(warp::query::()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then(|query, task_spawner: TaskSpawner, chain| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_rewards::get_block_rewards(query, chain) - }) - }); - - // POST lighthouse/analysis/block_rewards - let post_lighthouse_block_rewards = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("block_rewards")) - .and(warp_utils::json::json()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then(|blocks, task_spawner: TaskSpawner, chain| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_rewards::compute_block_rewards(blocks, chain) - }) - }); - - // GET lighthouse/analysis/attestation_performance/{index} - let get_lighthouse_attestation_performance = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("attestation_performance")) - .and(warp::path::param::()) - .and(warp::query::()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |target, query, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - attestation_performance::get_attestation_performance(target, query, chain) - }) - }, - ); - - // GET lighthouse/analysis/block_packing_efficiency - let get_lighthouse_block_packing_efficiency = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("block_packing_efficiency")) - .and(warp::query::()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |query, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_packing_efficiency::get_block_packing_efficiency(query, chain) - }) - }, - ); - let get_events = eth_v1 .clone() .and(warp::path("events")) @@ -3192,9 +3155,6 @@ pub fn serve( api_types::EventTopic::LightClientOptimisticUpdate => { event_handler.subscribe_light_client_optimistic_update() } - api_types::EventTopic::BlockReward => { - event_handler.subscribe_block_reward() - } api_types::EventTopic::AttesterSlashing => { event_handler.subscribe_attester_slashing() } @@ -3329,6 +3289,7 @@ pub fn serve( .uor(get_beacon_state_pending_deposits) .uor(get_beacon_state_pending_partial_withdrawals) .uor(get_beacon_state_pending_consolidations) + .uor(get_beacon_state_proposer_lookahead) .uor(get_beacon_headers) .uor(get_beacon_headers_block_id) .uor(get_beacon_block) @@ -3377,14 +3338,12 @@ pub fn serve( .uor(get_lighthouse_validator_inclusion) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) + .uor(get_lighthouse_database_invariants) .uor(get_lighthouse_custody_info) - .uor(get_lighthouse_block_rewards) - .uor(get_lighthouse_attestation_performance) .uor(get_beacon_light_client_optimistic_update) .uor(get_beacon_light_client_finality_update) .uor(get_beacon_light_client_bootstrap) .uor(get_beacon_light_client_updates) - .uor(get_lighthouse_block_packing_efficiency) .uor(get_events) .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) @@ -3429,7 +3388,6 @@ pub fn serve( .uor(post_validator_liveness_epoch) .uor(post_lighthouse_liveness) .uor(post_lighthouse_database_reconstruct) - .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) .uor(post_lighthouse_finalize) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 1ebb174785..0b0926f955 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -13,13 +13,45 @@ use slot_clock::SlotClock; use tracing::debug; use types::{Epoch, EthSpec, Hash256, Slot}; +/// Selects which dependent root to return in the API response. +/// +/// - `Legacy`: the block root at the last slot of epoch N-1 (v1 behaviour, for backwards compat). +/// - `True`: the fork-aware proposer shuffling decision root (v2 behaviour). Pre-Fulu this equals +/// the legacy root; post-Fulu it uses epoch N-2. +#[derive(Clone, Copy, PartialEq, Eq)] +enum DependentRootSelection { + Legacy, + True, +} + /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; -/// Handles a request from the HTTP API for proposer duties. +/// Handles a request from the HTTP API for v1 proposer duties. +/// +/// Returns the legacy dependent root (block root at end of epoch N-1) for backwards compatibility. pub fn proposer_duties( request_epoch: Epoch, chain: &BeaconChain, +) -> Result { + proposer_duties_internal(request_epoch, chain, DependentRootSelection::Legacy) +} + +/// Handles a request from the HTTP API for v2 proposer duties. +/// +/// Returns the true fork-aware dependent root. Pre-Fulu this equals the legacy root; post-Fulu it +/// uses epoch N-2 due to deterministic proposer lookahead with `min_seed_lookahead`. +pub fn proposer_duties_v2( + request_epoch: Epoch, + chain: &BeaconChain, +) -> Result { + proposer_duties_internal(request_epoch, chain, DependentRootSelection::True) +} + +fn proposer_duties_internal( + request_epoch: Epoch, + chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result { let current_epoch = chain .slot_clock @@ -49,24 +81,29 @@ pub fn proposer_duties( if request_epoch == current_epoch || request_epoch == tolerant_current_epoch { // If we could consider ourselves in the `request_epoch` when allowing for clock disparity // tolerance then serve this request from the cache. - if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain)? { + if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain, root_selection)? + { Ok(duties) } else { debug!(%request_epoch, "Proposer cache miss"); - compute_and_cache_proposer_duties(request_epoch, chain) + compute_and_cache_proposer_duties(request_epoch, chain, root_selection) } } else if request_epoch == current_epoch .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) = + let (proposers, dependent_root, legacy_dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => legacy_dependent_root, + DependentRootSelection::True => dependent_root, + }; convert_to_api_response( chain, request_epoch, - legacy_dependent_root, + selected_root, execution_status.is_optimistic_or_invalid(), proposers, ) @@ -84,7 +121,7 @@ pub fn proposer_duties( // request_epoch < current_epoch // // Queries about the past are handled with a slow path. - compute_historic_proposer_duties(request_epoch, chain) + compute_historic_proposer_duties(request_epoch, chain, root_selection) } } @@ -98,6 +135,7 @@ pub fn proposer_duties( fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result, warp::reject::Rejection> { let head = chain.canonical_head.cached_head(); let head_block = &head.snapshot.beacon_block; @@ -116,11 +154,14 @@ fn try_proposer_duties_from_cache( .beacon_state .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; - let legacy_dependent_root = head - .snapshot - .beacon_state - .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) - .map_err(warp_utils::reject::beacon_state_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => head + .snapshot + .beacon_state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?, + DependentRootSelection::True => head_decision_root, + }; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; @@ -134,7 +175,7 @@ fn try_proposer_duties_from_cache( convert_to_api_response( chain, request_epoch, - legacy_dependent_root, + selected_root, execution_optimistic, indices.to_vec(), ) @@ -155,6 +196,7 @@ fn try_proposer_duties_from_cache( fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result { let (indices, dependent_root, legacy_dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) @@ -168,10 +210,14 @@ fn compute_and_cache_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => legacy_dependent_root, + DependentRootSelection::True => dependent_root, + }; convert_to_api_response( chain, current_epoch, - legacy_dependent_root, + selected_root, execution_status.is_optimistic_or_invalid(), indices, ) @@ -182,6 +228,7 @@ fn compute_and_cache_proposer_duties( fn compute_historic_proposer_duties( epoch: Epoch, chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result { // If the head is quite old then it might still be relevant for a historical request. // @@ -219,9 +266,9 @@ fn compute_historic_proposer_duties( }; // Ensure the state lookup was correct. - if state.current_epoch() != epoch { + if state.current_epoch() != epoch && state.current_epoch() + 1 != epoch { return Err(warp_utils::reject::custom_server_error(format!( - "state epoch {} not equal to request epoch {}", + "state from epoch {} cannot serve request epoch {}", state.current_epoch(), epoch ))); @@ -234,18 +281,18 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. - let legacy_dependent_root = state - .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) - .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::unhandled_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => state + .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::unhandled_error)?, + DependentRootSelection::True => state + .proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root, &chain.spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::unhandled_error)?, + }; - convert_to_api_response( - chain, - epoch, - legacy_dependent_root, - execution_optimistic, - indices, - ) + convert_to_api_response(chain, epoch, selected_root, execution_optimistic, indices) } /// Converts the internal representation of proposer duties into one that is compatible with the diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index bbf92a4dda..43dfbeb836 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -2,7 +2,7 @@ use crate::metrics; use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::{AsBlock, LookupBlock}; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ @@ -311,19 +311,11 @@ pub async fn publish_block>( slot = %block.slot(), "Block previously seen" ); - let Ok(rpc_block) = RpcBlock::new( - block.clone(), - None, - &chain.data_availability_checker, - chain.spec.clone(), - ) else { - return Err(warp_utils::reject::custom_bad_request( - "Unable to construct rpc block".to_string(), - )); - }; + // try to reprocess as a lookup (single) block and let sync take care of missing components + let lookup_block = LookupBlock::new(block.clone()); let import_result = Box::pin(chain.process_block( block_root, - rpc_block, + lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index a9082df715..3d96b85870 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -6,7 +6,7 @@ use crate::utils::{ AnyVersionFilter, ChainFilter, EthV1Filter, NetworkTxFilter, NotWhileSyncingFilter, ResponseFilter, TaskSpawnerFilter, ValidatorSubscriptionTxFilter, publish_network_message, }; -use crate::version::V3; +use crate::version::{V1, V2, V3, unsupported_version_rejection}; use crate::{StateId, attester_duties, proposer_duties, sync_committees}; use beacon_chain::attestation_verification::VerifiedAttestation; use beacon_chain::validator_monitor::timestamp_now; @@ -971,12 +971,12 @@ pub fn post_validator_aggregate_and_proofs( // GET validator/duties/proposer/{epoch} pub fn get_validator_duties_proposer( - eth_v1: EthV1Filter, + any_version: AnyVersionFilter, chain_filter: ChainFilter, not_while_syncing_filter: NotWhileSyncingFilter, task_spawner_filter: TaskSpawnerFilter, ) -> ResponseFilter { - eth_v1 + any_version .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("proposer")) @@ -990,13 +990,20 @@ pub fn get_validator_duties_proposer( .and(task_spawner_filter) .and(chain_filter) .then( - |epoch: Epoch, + |endpoint_version: EndpointVersion, + epoch: Epoch, not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - proposer_duties::proposer_duties(epoch, &chain) + if endpoint_version == V1 { + proposer_duties::proposer_duties(epoch, &chain) + } else if endpoint_version == V2 { + proposer_duties::proposer_duties_v2(epoch, &chain) + } else { + Err(unsupported_version_rejection(endpoint_version)) + } }) }, ) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index a18dd10464..e0e4029875 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1053,6 +1053,240 @@ async fn proposer_duties_with_gossip_tolerance() { ); } +// Test that a request for next epoch v2 proposer duties succeeds when the current slot clock is +// within gossip clock disparity (500ms) of the new epoch. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn proposer_duties_v2_with_gossip_tolerance() { + let validator_count = 24; + + let tester = InteractiveTester::::new(None, validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + + let num_initial = 4 * E::slots_per_epoch() - 1; + let next_epoch_start_slot = Slot::new(num_initial + 1); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + assert_eq!(harness.chain.slot().unwrap(), num_initial); + + // Set the clock to just before the next epoch. + harness.chain.slot_clock.advance_time( + Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(), + ); + assert_eq!( + harness + .chain + .slot_clock + .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) + .unwrap(), + next_epoch_start_slot + ); + + let head_state = harness.get_current_state(); + let head_block_root = harness.head_block_root(); + let tolerant_current_epoch = next_epoch_start_slot.epoch(E::slots_per_epoch()); + + // Prime the proposer shuffling cache with an incorrect entry (regression test). + let wrong_decision_root = head_state + .proposer_shuffling_decision_root(head_block_root, spec) + .unwrap(); + let wrong_proposer_indices = vec![0; E::slots_per_epoch() as usize]; + harness + .chain + .beacon_proposer_cache + .lock() + .insert( + tolerant_current_epoch, + wrong_decision_root, + wrong_proposer_indices.clone(), + head_state.fork(), + ) + .unwrap(); + + // Request the v2 proposer duties. + let proposer_duties_tolerant_current_epoch = client + .get_validator_duties_proposer_v2(tolerant_current_epoch) + .await + .unwrap(); + + assert_eq!( + proposer_duties_tolerant_current_epoch.dependent_root, + head_state + .proposer_shuffling_decision_root_at_epoch( + tolerant_current_epoch, + head_block_root, + spec, + ) + .unwrap() + ); + assert_ne!( + proposer_duties_tolerant_current_epoch + .data + .iter() + .map(|data| data.validator_index as usize) + .collect::>(), + wrong_proposer_indices, + ); + + // We should get the exact same result after properly advancing into the epoch. + harness + .chain + .slot_clock + .advance_time(spec.maximum_gossip_clock_disparity()); + assert_eq!(harness.chain.slot().unwrap(), next_epoch_start_slot); + let proposer_duties_current_epoch = client + .get_validator_duties_proposer_v2(tolerant_current_epoch) + .await + .unwrap(); + + assert_eq!( + proposer_duties_tolerant_current_epoch, + proposer_duties_current_epoch + ); +} + +// Test that post-Fulu, v1 and v2 proposer duties return different dependent roots. +// Post-Fulu, the true dependent root shifts to the block root at the end of epoch N-2 (due to +// `min_seed_lookahead`), while the legacy v1 root remains at the end of epoch N-1. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn proposer_duties_v2_post_fulu_dependent_root() { + type E = MinimalEthSpec; + let spec = test_spec::(); + + if !spec.is_fulu_scheduled() { + return; + } + + let validator_count = 24; + let slots_per_epoch = E::slots_per_epoch(); + + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + let harness = &tester.harness; + let client = &tester.client; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + mock_el.server.all_payloads_valid(); + + // Build 3 full epochs of chain so we're in epoch 3. + let num_slots = 3 * slots_per_epoch; + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_slots as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::AllValidators, + LightClientStrategy::Disabled, + ) + .await; + + let current_epoch = harness.chain.epoch().unwrap(); + assert_eq!(current_epoch, Epoch::new(3)); + + // For epoch 3 with min_seed_lookahead=1: + // Post-Fulu decision slot: end of epoch N-2 = end of epoch 1 = slot 15 + // Legacy decision slot: end of epoch N-1 = end of epoch 2 = slot 23 + let true_decision_slot = Epoch::new(1).end_slot(slots_per_epoch); + let legacy_decision_slot = Epoch::new(2).end_slot(slots_per_epoch); + assert_eq!(true_decision_slot, Slot::new(15)); + assert_eq!(legacy_decision_slot, Slot::new(23)); + + // Fetch the block roots at these slots to compute expected dependent roots. + let expected_v2_root = harness + .chain + .block_root_at_slot(true_decision_slot, beacon_chain::WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let expected_v1_root = harness + .chain + .block_root_at_slot(legacy_decision_slot, beacon_chain::WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + + // Sanity check: the two roots should be different since they refer to different blocks. + assert_ne!( + expected_v1_root, expected_v2_root, + "legacy and true decision roots should differ post-Fulu" + ); + + // Query v1 and v2 proposer duties for the current epoch. + let v1_result = client + .get_validator_duties_proposer(current_epoch) + .await + .unwrap(); + let v2_result = client + .get_validator_duties_proposer_v2(current_epoch) + .await + .unwrap(); + + // The proposer assignments (data) must be identical. + assert_eq!(v1_result.data, v2_result.data); + + // The dependent roots must differ. + assert_ne!( + v1_result.dependent_root, v2_result.dependent_root, + "v1 and v2 dependent roots should differ post-Fulu" + ); + + // Verify each root matches the expected value. + assert_eq!( + v1_result.dependent_root, expected_v1_root, + "v1 dependent root should be block root at end of epoch N-1" + ); + assert_eq!( + v2_result.dependent_root, expected_v2_root, + "v2 dependent root should be block root at end of epoch N-2" + ); + + // Also verify the next-epoch path (epoch 4). + let next_epoch = current_epoch + 1; + let v1_next = client + .get_validator_duties_proposer(next_epoch) + .await + .unwrap(); + let v2_next = client + .get_validator_duties_proposer_v2(next_epoch) + .await + .unwrap(); + + assert_eq!(v1_next.data, v2_next.data); + assert_ne!( + v1_next.dependent_root, v2_next.dependent_root, + "v1 and v2 next-epoch dependent roots should differ post-Fulu" + ); + + // For epoch 4: true decision is end of epoch 2 (slot 23), legacy is end of epoch 3 (slot 31). + let expected_v2_next_root = harness + .chain + .block_root_at_slot( + Epoch::new(2).end_slot(slots_per_epoch), + beacon_chain::WhenSlotSkipped::Prev, + ) + .unwrap() + .unwrap(); + let expected_v1_next_root = harness + .chain + .block_root_at_slot( + Epoch::new(3).end_slot(slots_per_epoch), + beacon_chain::WhenSlotSkipped::Prev, + ) + .unwrap() + .unwrap_or(harness.head_block_root()); + assert_eq!(v1_next.dependent_root, expected_v1_next_root); + assert_eq!(v2_next.dependent_root, expected_v2_next_root); + assert_ne!(expected_v2_next_root, harness.head_block_root()); +} + // Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo` // have been updated with the correct values. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6696e109a5..c9086dd876 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -37,7 +37,7 @@ use proto_array::ExecutionStatus; use reqwest::{RequestBuilder, Response, StatusCode}; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; -use ssz::BitList; +use ssz::{BitList, Decode}; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use state_processing::state_advance::partial_state_advance; @@ -1409,6 +1409,73 @@ impl ApiTester { self } + pub async fn test_beacon_states_proposer_lookahead(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = match self + .client + .get_beacon_states_proposer_lookahead(state_id.0) + .await + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.proposer_lookahead().unwrap().to_vec(); + + let response = result.unwrap(); + // Compare Vec directly, not Vec + assert_eq!(response.data().0, expected); + + // Check that the version header is returned in the response + let fork_name = state.fork_name(&self.chain.spec).unwrap(); + assert_eq!(response.version(), Some(fork_name),); + } + + self + } + + pub async fn test_beacon_states_proposer_lookahead_ssz(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = match self + .client + .get_beacon_states_proposer_lookahead_ssz(state_id.0) + .await + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.proposer_lookahead().unwrap(); + + let ssz_bytes = result.unwrap(); + let decoded = Vec::::from_ssz_bytes(&ssz_bytes) + .expect("should decode SSZ proposer lookahead"); + assert_eq!(decoded, expected.to_vec()); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -3392,6 +3459,80 @@ impl ApiTester { self } + pub async fn test_get_validator_duties_proposer_v2(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + + for epoch in 0..=current_epoch.as_u64() + 1 { + let epoch = Epoch::from(epoch); + + // Compute the true dependent root using the spec's decision slot. + let decision_slot = self.chain.spec.proposer_shuffling_decision_slot::(epoch); + let dependent_root = self + .chain + .block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap_or(self.chain.head_beacon_block_root()); + + let result = self + .client + .get_validator_duties_proposer_v2(epoch) + .await + .unwrap(); + + let mut state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected_duties = epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let index = state + .get_beacon_proposer_index(slot, &self.chain.spec) + .unwrap(); + let pubkey = state.validators().get(index).unwrap().pubkey; + + ProposerData { + pubkey, + validator_index: index as u64, + slot, + } + }) + .collect::>(); + + let expected = DutiesResponse { + data: expected_duties, + execution_optimistic: Some(false), + dependent_root, + }; + + assert_eq!(result, expected); + + // v1 and v2 should return the same data. + let v1_result = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap(); + assert_eq!(result.data, v1_result.data); + } + + // Requests to the epochs after the next epoch should fail. + self.client + .get_validator_duties_proposer_v2(current_epoch + 2) + .await + .unwrap_err(); + + self + } + pub async fn test_get_validator_duties_early(self) -> Self { let current_epoch = self.chain.epoch().unwrap(); let next_epoch = current_epoch + 1; @@ -7054,15 +7195,16 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(true)); } - async fn test_get_beacon_rewards_blocks_at_head(&self) -> StandardBlockReward { + async fn test_get_beacon_rewards_blocks_at_head( + &self, + ) -> ExecutionOptimisticFinalizedResponse { self.client .get_beacon_rewards_blocks(CoreBlockId::Head) .await .unwrap() - .data } - async fn test_beacon_block_rewards_electra(self) -> Self { + async fn test_beacon_block_rewards_fulu(self) -> Self { for _ in 0..E::slots_per_epoch() { let state = self.harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -7076,8 +7218,80 @@ impl ApiTester { .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); self.harness.extend_slots(1).await; - let api_beacon_block_reward = self.test_get_beacon_rewards_blocks_at_head().await; - assert_eq!(beacon_block_reward, api_beacon_block_reward); + let response = self.test_get_beacon_rewards_blocks_at_head().await; + assert_eq!(response.execution_optimistic, Some(false)); + assert_eq!(response.finalized, Some(false)); + assert_eq!(beacon_block_reward, response.data); + } + self + } + + async fn test_get_beacon_rewards_sync_committee_at_head( + &self, + ) -> ExecutionOptimisticFinalizedResponse> { + self.client + .post_beacon_rewards_sync_committee(CoreBlockId::Head, &[]) + .await + .unwrap() + } + + async fn test_beacon_sync_committee_rewards_fulu(self) -> Self { + for _ in 0..E::slots_per_epoch() { + let state = self.harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + let ((signed_block, _maybe_blob_sidecars), mut state) = + self.harness.make_block_return_pre_state(state, slot).await; + + let mut expected_rewards = self + .harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + expected_rewards.sort_by_key(|r| r.validator_index); + + self.harness.extend_slots(1).await; + + let response = self.test_get_beacon_rewards_sync_committee_at_head().await; + assert_eq!(response.execution_optimistic, Some(false)); + assert_eq!(response.finalized, Some(false)); + let mut api_rewards = response.data; + api_rewards.sort_by_key(|r| r.validator_index); + assert_eq!(expected_rewards, api_rewards); + } + self + } + + async fn test_get_beacon_rewards_attestations( + &self, + epoch: Epoch, + ) -> ExecutionOptimisticFinalizedResponse { + self.client + .post_beacon_rewards_attestations(epoch, &[]) + .await + .unwrap() + } + + async fn test_beacon_attestation_rewards_fulu(self) -> Self { + // Check 3 epochs. + let num_epochs = 3; + for _ in 0..num_epochs { + self.harness + .extend_slots(E::slots_per_epoch() as usize) + .await; + + let epoch = self.chain.epoch().unwrap() - 1; + + let expected_rewards = self + .harness + .chain + .compute_attestation_rewards(epoch, vec![]) + .unwrap(); + + let response = self.test_get_beacon_rewards_attestations(epoch).await; + assert_eq!(response.execution_optimistic, Some(false)); + assert_eq!(response.finalized, Some(false)); + assert_eq!(expected_rewards, response.data); } self } @@ -7286,6 +7500,23 @@ async fn beacon_get_state_info_electra() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info_fulu() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_states_proposer_lookahead() + .await + .test_beacon_states_proposer_lookahead_ssz() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_get_blocks() { ApiTester::new() @@ -7617,6 +7848,31 @@ async fn get_validator_duties_proposer_with_skip_slots() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_duties_proposer_v2() { + ApiTester::new_from_config(ApiTesterConfig { + spec: test_spec::(), + retain_historic_states: true, + ..ApiTesterConfig::default() + }) + .await + .test_get_validator_duties_proposer_v2() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_duties_proposer_v2_with_skip_slots() { + ApiTester::new_from_config(ApiTesterConfig { + spec: test_spec::(), + retain_historic_states: true, + ..ApiTesterConfig::default() + }) + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_proposer_v2() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn block_production() { ApiTester::new().await.test_block_production().await; @@ -8351,16 +8607,47 @@ async fn expected_withdrawals_valid_capella() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_beacon_rewards_blocks_electra() { +async fn get_beacon_rewards_blocks_fulu() { let mut config = ApiTesterConfig::default(); config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); config.spec.capella_fork_epoch = Some(Epoch::new(0)); config.spec.deneb_fork_epoch = Some(Epoch::new(0)); config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); ApiTester::new_from_config(config) .await - .test_beacon_block_rewards_electra() + .test_beacon_block_rewards_fulu() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_beacon_rewards_sync_committee_fulu() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_sync_committee_rewards_fulu() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_beacon_rewards_attestations_fulu() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_attestation_rewards_fulu() .await; } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 10156a9ff5..346e350825 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -562,7 +562,7 @@ fn handle_rpc_request( RequestType::PayloadEnvelopesByRoot(PayloadEnvelopesByRootRequest { beacon_block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blocks(current_fork), + spec.max_request_payloads(), )?, }), )), diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9861119ac1..336747fb83 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -954,6 +954,35 @@ where return; } } + RequestType::PayloadEnvelopesByRange(request) => { + let max_allowed = spec.max_request_payloads; + if request.count > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::PayloadEnvelopesByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, request.count + )), + })); + return; + } + } + RequestType::DataColumnsByRange(request) => { + let max_requested = request.max_requested::(); + let max_allowed = spec.max_request_data_column_sidecars; + if max_requested > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::DataColumnsByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, max_requested + )), + })); + return; + } + } _ => {} }; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 14bf2415c4..2c92e17c44 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -17,10 +17,10 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockGloas, BlobSidecar, ChainSpec, - DataColumnSidecarFulu, DataColumnSidecarGloas, EmptyBlock, Epoch, EthSpec, EthSpecId, - ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, - LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecarFulu, + DataColumnSidecarGloas, EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, SignedExecutionPayloadEnvelope, }; @@ -65,17 +65,6 @@ pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = + types::ExecutionPayload::::max_execution_payload_bellatrix_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET); // Adding the additional ssz offset for the `ExecutionPayload` field -/// Gloas blocks no longer contain an execution payload (it's in the envelope), -/// so they are significantly smaller than Bellatrix+ blocks. -pub static SIGNED_BEACON_BLOCK_GLOAS_MAX: LazyLock = LazyLock::new(|| { - SignedBeaconBlock::::from_block( - BeaconBlock::Gloas(BeaconBlockGloas::full(&MainnetEthSpec::default_spec())), - Signature::empty(), - ) - .as_ssz_bytes() - .len() -}); - pub static SIGNED_EXECUTION_PAYLOAD_ENVELOPE_MIN: LazyLock = LazyLock::new(SignedExecutionPayloadEnvelope::::min_size); @@ -157,18 +146,18 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { ), // After the merge the max SSZ size of a block is absurdly big. The size is actually // bound by other constants, so here we default to the bellatrix's max value + // After the merge the max SSZ size includes the execution payload. + // Gloas blocks no longer contain the execution payload, but we must + // still accept pre-Gloas blocks during historical sync, so we keep the + // Bellatrix max as the upper bound. ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra - | ForkName::Fulu => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks - *SIGNED_BEACON_BLOCK_BELLATRIX_MAX, // Bellatrix block is larger than base and altair blocks - ), - // Gloas blocks no longer contain the execution payload, so they are much smaller - ForkName::Gloas => RpcLimits::new( + | ForkName::Fulu + | ForkName::Gloas => RpcLimits::new( *SIGNED_BEACON_BLOCK_BASE_MIN, - *SIGNED_BEACON_BLOCK_GLOAS_MAX, + *SIGNED_BEACON_BLOCK_BELLATRIX_MAX, ), } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index b0fd9f4dd5..ebdca386d8 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -110,9 +110,9 @@ pub struct RPCRateLimiter { /// BlobsByRoot rate limiter. blbroot_rl: Limiter, /// PayloadEnvelopesByRange rate limiter. - perange_rl: Limiter, + envrange_rl: Limiter, /// PayloadEnvelopesByRoot rate limiter. - peroots_rl: Limiter, + envroots_rl: Limiter, /// DataColumnsByRoot rate limiter. dcbroot_rl: Limiter, /// DataColumnsByRange rate limiter. @@ -252,8 +252,8 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; - let perange_rl = Limiter::from_quota(perange_quota)?; - let peroots_rl = Limiter::from_quota(peroots_quota)?; + let envrange_rl = Limiter::from_quota(perange_quota)?; + let envroots_rl = Limiter::from_quota(peroots_quota)?; let blbrange_rl = Limiter::from_quota(blbrange_quota)?; let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; @@ -277,8 +277,8 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, - perange_rl, - peroots_rl, + envrange_rl, + envroots_rl, blbrange_rl, blbroot_rl, dcbroot_rl, @@ -406,8 +406,8 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, - Protocol::PayloadEnvelopesByRange => &mut self.perange_rl, - Protocol::PayloadEnvelopesByRoot => &mut self.peroots_rl, + Protocol::PayloadEnvelopesByRange => &mut self.envrange_rl, + Protocol::PayloadEnvelopesByRoot => &mut self.envroots_rl, Protocol::BlobsByRange => &mut self.blbrange_rl, Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, @@ -432,8 +432,8 @@ impl RPCRateLimiter { status_rl, bbrange_rl, bbroots_rl, - perange_rl, - peroots_rl, + envrange_rl, + envroots_rl, blbrange_rl, blbroot_rl, dcbroot_rl, @@ -451,8 +451,8 @@ impl RPCRateLimiter { status_rl.prune(time_since_start); bbrange_rl.prune(time_since_start); bbroots_rl.prune(time_since_start); - perange_rl.prune(time_since_start); - peroots_rl.prune(time_since_start); + envrange_rl.prune(time_since_start); + envroots_rl.prune(time_since_start); blbrange_rl.prune(time_since_start); blbroot_rl.prune(time_since_start); dcbrange_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 6c503d3e10..56fcbb3bb6 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -187,10 +187,9 @@ impl Network { // set up a collection of variables accessible outside of the network crate // Create an ENR or load from disk if appropriate - let next_fork_digest = ctx - .fork_context - .next_fork_digest() - .unwrap_or_else(|| ctx.fork_context.current_fork_digest()); + // Per [spec](https://github.com/ethereum/consensus-specs/blob/1baa05e71148b0975e28918ac6022d2256b56f4a/specs/fulu/p2p-interface.md?plain=1#L636-L637) + // `nfd` must be zero-valued when no next fork is scheduled. + let next_fork_digest = ctx.fork_context.next_fork_digest().unwrap_or_default(); let advertised_cgc = config .advertise_false_custody_group_count diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 0fda9760b7..5e49c7b925 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -20,7 +20,9 @@ use beacon_chain::{ }; use beacon_chain::{ blob_verification::{GossipBlobError, GossipVerifiedBlob}, - payload_envelope_verification::gossip_verified_envelope::GossipVerifiedEnvelope, + payload_envelope_verification::{ + EnvelopeError, gossip_verified_envelope::GossipVerifiedEnvelope, + }, }; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; @@ -3275,6 +3277,14 @@ impl NetworkBeaconProcessor { } } + #[allow(clippy::too_many_arguments)] + #[instrument( + name = "lh_process_execution_payload_envelope", + parent = None, + level = "debug", + skip_all, + fields(beacon_block_root = tracing::field::Empty), + )] pub async fn process_gossip_execution_payload_envelope( self: Arc, message_id: MessageId, @@ -3314,69 +3324,6 @@ impl NetworkBeaconProcessor { envelope: Arc>, seen_duration: Duration, ) -> Option> { - let beacon_block_root = envelope.message.beacon_block_root; - let envelope_slot = envelope.slot(); - - // Check if the envelope's block is known to fork choice before attempting full - // verification. If the block isn't known yet, defer the envelope to the reprocess - // queue so it can be processed once the block is imported. - let block_known = self - .chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&beacon_block_root); - - if !block_known { - debug!( - ?beacon_block_root, - %envelope_slot, - "Envelope references unknown block, deferring" - ); - - let inner_self = self.clone(); - let chain = self.chain.clone(); - let process_fn = Box::pin(async move { - match chain.verify_envelope_for_gossip(envelope).await { - Ok(verified_envelope) => { - inner_self - .process_gossip_verified_execution_payload_envelope( - peer_id, - verified_envelope, - ) - .await; - } - Err(e) => { - debug!( - error = ?e, - "Deferred envelope failed verification" - ); - } - } - }); - - if self - .beacon_processor_send - .try_send(WorkEvent { - drop_during_sync: false, - work: Work::Reprocess(ReprocessQueueMessage::UnknownBlockEnvelope( - QueuedGossipEnvelope { - beacon_block_slot: envelope_slot, - beacon_block_root, - process_fn, - }, - )), - }) - .is_err() - { - error!( - %envelope_slot, - ?beacon_block_root, - "Failed to defer envelope import" - ); - } - return None; - } - let envelope_delay = get_slot_delay_ms(seen_duration, envelope.slot(), &self.chain.slot_clock); @@ -3411,17 +3358,70 @@ impl NetworkBeaconProcessor { verified_envelope } + + Err(EnvelopeError::BlockRootUnknown { block_root }) => { + let envelope_slot = envelope.slot(); + + debug!( + ?block_root, + %envelope_slot, + "Envelope references unknown block, deferring to reprocess queue" + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + + let inner_self = self.clone(); + let chain = self.chain.clone(); + let process_fn = Box::pin(async move { + match chain.verify_envelope_for_gossip(envelope).await { + Ok(verified_envelope) => { + inner_self + .process_gossip_verified_execution_payload_envelope( + peer_id, + verified_envelope, + ) + .await; + } + Err(e) => { + debug!( + error = ?e, + "Deferred envelope failed verification" + ); + } + } + }); + + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::UnknownBlockForEnvelope( + QueuedGossipEnvelope { + beacon_block_slot: envelope_slot, + beacon_block_root: block_root, + process_fn, + }, + )), + }) + .is_err() + { + error!( + %envelope_slot, + ?block_root, + "Failed to defer envelope import" + ); + } + return None; + } // TODO(gloas) penalize peers accordingly Err(_) => return None, }; - // TODO(gloas) do we need to register the payload with monitored validators? - let envelope_slot = verified_envelope.signed_envelope.slot(); let beacon_block_root = verified_envelope.signed_envelope.beacon_block_root(); match self.chain.slot() { - // We only need to do a simple check about the envelope slot vs the current slot beacuse - // `verify_envelope_for_gossip` already ensuresthat the envelope slot is within tolerance + // We only need to do a simple check about the envelope slot vs the current slot because + // `verify_envelope_for_gossip` already ensures that the envelope slot is within tolerance // for envelope imports. Ok(current_slot) if envelope_slot > current_slot => { warn!( @@ -3462,12 +3462,13 @@ impl NetworkBeaconProcessor { async fn process_gossip_verified_execution_payload_envelope( self: Arc, - peer_id: PeerId, + _peer_id: PeerId, verified_envelope: GossipVerifiedEnvelope, ) { let _processing_start_time = Instant::now(); let beacon_block_root = verified_envelope.signed_envelope.beacon_block_root(); + #[allow(clippy::result_large_err)] let result = self .chain .process_execution_payload_envelope( @@ -3483,31 +3484,12 @@ impl NetworkBeaconProcessor { // register_process_result_metrics(&result, metrics::BlockSource::Gossip, "envelope"); match &result { - Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - // TODO(gloas) do we need to send a `PayloadImported` event to the reporcess queue? - debug!( - ?block_root, - %peer_id, - "Gossipsub envelope processed" - ); - - // TODO(gloas) do we need to recompute head? - // should canonical_head return the block and the payload now? - self.chain.recompute_head_at_current_slot().await; - - // TODO(gloas) metrics + Ok(AvailabilityProcessingStatus::Imported(_)) + | Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + // Nothing to do } - Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { - trace!( - %slot, - %block_root, - "Processed envelope, waiting for other components" - ) - } - Err(_) => { // TODO(gloas) implement peer penalties - warn!("process_gossip_verified_execution_payload_envelope_failed") } } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 8f45097408..ca5710076b 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,7 +1,8 @@ use crate::sync::manager::BlockProcessType; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::blob_verification::{GossipBlobError, observe_gossip_blob}; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, observe_gossip_data_column}; use beacon_chain::fetch_blobs::{ EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs, @@ -518,14 +519,14 @@ impl NetworkBeaconProcessor { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. - pub fn send_rpc_beacon_block( + pub fn send_lookup_beacon_block( self: &Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), Error> { - let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + let process_fn = self.clone().generate_lookup_beacon_block_process_fn( block_root, block, seen_timestamp, @@ -627,7 +628,7 @@ impl NetworkBeaconProcessor { pub fn send_chain_segment( self: &Arc, process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>, ) -> Result<(), Error> { debug!(blocks = blocks.len(), id = ?process_id, "Batch sending for process"); let processor = self.clone(); diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 88d3697c9d..8b31b67acb 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -3,6 +3,7 @@ use crate::network_beacon_processor::{FUTURE_SLOT_TOLERANCE, NetworkBeaconProces use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; +use beacon_chain::payload_envelope_streamer::EnvelopeRequestSource; use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenSlotSkipped}; use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ @@ -16,7 +17,7 @@ use slot_clock::SlotClock; use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::sync::Arc; use tokio_stream::StreamExt; -use tracing::{Span, debug, error, field, instrument, warn}; +use tracing::{Span, debug, error, field, instrument, trace, warn}; use types::data::BlobIdentifier; use types::{ColumnIndex, Epoch, EthSpec, Hash256, Slot}; @@ -303,19 +304,10 @@ impl NetworkBeaconProcessor { }; let requested_envelopes = request.beacon_block_roots.len(); - let mut envelope_stream = match self - .chain - .get_payload_envelopes_checking_caches(request.beacon_block_roots.to_vec()) - { - Ok(envelope_stream) => envelope_stream, - Err(e) => { - error!( error = ?e, "Error getting payload envelope stream"); - return Err(( - RpcErrorResponse::ServerError, - "Error getting payload envelope stream", - )); - } - }; + let mut envelope_stream = self.chain.get_payload_envelopes( + request.beacon_block_roots.to_vec(), + EnvelopeRequestSource::ByRoot, + ); // Fetching payload envelopes is async because it may have to hit the execution layer for payloads. let mut send_envelope_count = 0; while let Some((root, result)) = envelope_stream.next().await { @@ -1135,6 +1127,19 @@ impl NetworkBeaconProcessor { "Received ExecutionPayloadEnvelopesByRange Request" ); + let request_start_slot = Slot::from(req_start_slot); + let fork_name = self + .chain + .spec + .fork_name_at_slot::(request_start_slot); + + if !fork_name.gloas_enabled() { + return Err(( + RpcErrorResponse::InvalidRequest, + "Requested envelopes for pre-gloas slots", + )); + } + // Spawn a blocking handle since get_block_roots_for_slot_range takes a sync lock on the // fork-choice. let network_beacon_processor = self.clone(); @@ -1185,13 +1190,9 @@ impl NetworkBeaconProcessor { } }; - let mut envelope_stream = match self.chain.get_payload_envelopes(block_roots) { - Ok(envelope_stream) => envelope_stream, - Err(e) => { - error!(error = ?e, "Error getting payload envelope stream"); - return Err((RpcErrorResponse::ServerError, "Iterator error")); - } - }; + let mut envelope_stream = self + .chain + .get_payload_envelopes(block_roots, EnvelopeRequestSource::ByRange); // Fetching payload envelopes is async because it may have to hit the execution layer for payloads. let mut envelopes_sent = 0; @@ -1201,7 +1202,7 @@ impl NetworkBeaconProcessor { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending if envelope.slot() >= req_start_slot - && envelope.slot() < req_start_slot + req.count + && envelope.slot() < req_start_slot.saturating_add(req.count) { envelopes_sent += 1; self.send_network_message(NetworkMessage::SendResponse { @@ -1212,14 +1213,12 @@ impl NetworkBeaconProcessor { } } Ok(None) => { - error!( + trace!( request = ?req, %peer_id, request_root = ?root, - "Envelope in the chain is not in the store" + "No envelope for block root" ); - log_results(peer_id, envelopes_sent); - return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { debug!( diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index a90ad0b750..b4586994e4 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -6,7 +6,8 @@ use crate::sync::{ ChainId, manager::{BlockProcessType, BlockProcessingResult, SyncMessage}, }; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::historical_data_columns::HistoricalDataColumnError; use beacon_chain::{ @@ -53,16 +54,16 @@ impl NetworkBeaconProcessor { /// /// This separate function was required to prevent a cycle during compiler /// type checking. - pub fn generate_rpc_beacon_block_process_fn( + pub fn generate_lookup_beacon_block_process_fn( self: Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { let process_fn = async move { let duplicate_cache = self.duplicate_cache.clone(); - self.process_rpc_block( + self.process_lookup_block( block_root, block, seen_timestamp, @@ -149,15 +150,15 @@ impl NetworkBeaconProcessor { } /// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block. - pub fn generate_rpc_beacon_block_fns( + pub fn generate_lookup_beacon_block_fns( self: Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> (AsyncFn, BlockingFn) { // An async closure which will import the block. - let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + let process_fn = self.clone().generate_lookup_beacon_block_process_fn( block_root, block, seen_timestamp, @@ -183,10 +184,10 @@ impl NetworkBeaconProcessor { skip_all, fields(?block_root), )] - pub async fn process_rpc_block( + pub async fn process_lookup_block( self: Arc>, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, duplicate_cache: DuplicateCache, @@ -194,14 +195,14 @@ impl NetworkBeaconProcessor { // Check if the block is already being imported through another source let Some(handle) = duplicate_cache.check_and_insert(block_root) else { debug!( - action = "sending rpc block to reprocessing queue", + action = "sending lookup block to reprocessing queue", %block_root, ?process_type, "Gossip block is being processed" ); // Send message to work reprocess queue to retry the block - let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( + let (process_fn, ignore_fn) = self.clone().generate_lookup_beacon_block_fns( block_root, block, seen_timestamp, @@ -236,7 +237,7 @@ impl NetworkBeaconProcessor { slot = %block.slot(), commitments_formatted, ?process_type, - "Processing RPC block" + "Processing Lookup block" ); let signed_beacon_block = block.block_cloned(); @@ -606,7 +607,7 @@ impl NetworkBeaconProcessor { pub async fn process_chain_segment( &self, process_id: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) { let ChainSegmentProcessId::RangeBatchId(chain_id, epoch) = process_id else { // This is a request from range sync, this should _never_ happen @@ -687,7 +688,7 @@ impl NetworkBeaconProcessor { pub fn process_chain_segment_backfill( &self, process_id: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) { let ChainSegmentProcessId::BackSyncBatchId(epoch) = process_id else { // this a request from RangeSync, this should _never_ happen @@ -758,7 +759,7 @@ impl NetworkBeaconProcessor { #[instrument(skip_all)] async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec<_> = downloaded_blocks.cloned().collect(); @@ -792,23 +793,13 @@ impl NetworkBeaconProcessor { #[instrument(skip_all)] fn process_backfill_blocks( &self, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { let total_blocks = downloaded_blocks.len(); - let mut available_blocks = vec![]; - - for downloaded_block in downloaded_blocks { - match downloaded_block { - RpcBlock::FullyAvailable(available_block) => available_blocks.push(available_block), - RpcBlock::BlockOnly { .. } => return ( - 0, - Err(ChainSegmentFailed { - peer_action: None, - message: "Invalid downloaded_blocks segment. All downloaded blocks must be fully available".to_string() - }) - ), - } - } + let available_blocks = downloaded_blocks + .into_iter() + .map(|block| block.into_available_block()) + .collect::>(); match self .chain diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 2e4b0fbd2a..d0f0557223 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -8,7 +8,7 @@ use crate::{ service::NetworkMessage, sync::{SyncMessage, manager::BlockProcessType}, }; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip_fulu; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; @@ -441,36 +441,24 @@ impl TestRig { } } - pub fn enqueue_rpc_block(&self) { + pub fn enqueue_lookup_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - RpcBlock::new( - self.next_block.clone(), - None, - &self._harness.chain.data_availability_checker, - self._harness.spec.clone(), - ) - .unwrap(), + LookupBlock::new(self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 0 }, ) .unwrap(); } - pub fn enqueue_single_lookup_rpc_block(&self) { + pub fn enqueue_single_lookup_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - RpcBlock::new( - self.next_block.clone(), - None, - &self._harness.chain.data_availability_checker, - self._harness.spec.clone(), - ) - .unwrap(), + LookupBlock::new(self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -1332,7 +1320,7 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod } } BlockImportMethod::Rpc => { - rig.enqueue_rpc_block(); + rig.enqueue_lookup_block(); events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); @@ -1418,7 +1406,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod } } BlockImportMethod::Rpc => { - rig.enqueue_rpc_block(); + rig.enqueue_lookup_block(); events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); @@ -1612,7 +1600,7 @@ async fn test_rpc_block_reprocessing() { let next_block_root = rig.next_block.canonical_root(); // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); - rig.enqueue_single_lookup_rpc_block(); + rig.enqueue_single_lookup_block(); rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; @@ -2352,3 +2340,8 @@ async fn test_payload_envelopes_by_range_no_duplicates_with_skip_slots() { unique_roots.len(), ); } + +// TODO(ePBS): Add integration tests for envelope deferral (UnknownBlockForEnvelope): +// 1. Gossip envelope arrives before its block → queued via UnknownBlockForEnvelope +// 2. Block imported → envelope released and processed successfully +// 3. Timeout path → envelope released and re-verified diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index be491e56d3..008e7ab9ac 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -198,13 +198,6 @@ impl SubnetService { self.permanent_attestation_subscriptions.iter() } - /// Returns whether we are subscribed to a subnet for testing purposes. - #[cfg(test)] - pub(crate) fn is_subscribed(&self, subnet: &Subnet) -> bool { - self.subscriptions.contains_key(subnet) - || self.permanent_attestation_subscriptions.contains(subnet) - } - /// Returns whether we are subscribed to a permanent subnet for testing purposes. #[cfg(test)] pub(crate) fn is_subscribed_permanent(&self, subnet: &Subnet) -> bool { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index bee6569b7b..619154d738 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -335,28 +335,26 @@ mod test { // submit the subscriptions subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter()); - // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) - let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); + let subnet = Subnet::Attestation(subnet_id1); - if subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { - // If we are permanently subscribed to this subnet, we won't see a subscribe message - let _ = get_events_until_num_slots(&mut subnet_service, None, 1).await; + if subnet_service.is_subscribed_permanent(&subnet) { + // If permanently subscribed, no Subscribe/Unsubscribe events will be generated + let events = get_events_until_num_slots(&mut subnet_service, None, 3).await; + assert!(events.is_empty()); } else { - let subscription = get_events_until_num_slots(&mut subnet_service, None, 1).await; - assert_eq!(subscription, [expected]); + // Wait 1 slot: expect a single Subscribe event (no duplicate for the same subnet). + let events = get_events_until_num_slots(&mut subnet_service, None, 1).await; + assert_eq!(events, [SubnetServiceMessage::Subscribe(subnet)]); + + // Wait for the Unsubscribe event after subscription_slot2 expires. + // Use a longer timeout because the test doesn't start exactly at a slot + // boundary, so the previous 1-slot wait may end partway through slot 1, + // leaving insufficient time to catch the Unsubscribe within another 1 slot. + let events = get_events_until_num_slots(&mut subnet_service, Some(1), 3).await; + assert_eq!(events, [SubnetServiceMessage::Unsubscribe(subnet)]); } - // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events_until_num_slots(&mut subnet_service, None, 1).await; - - // If the long lived and short lived subnets are different, we should get an unsubscription - // event. - let expected = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { - assert_eq!([expected], unsubscribe_event[..]); - } - - // Should no longer be subscribed to any short lived subnets after unsubscription. + // Should no longer be subscribed to any short lived subnets after unsubscription. assert_eq!(subnet_service.subscriptions().count(), 0); } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 801c9eca4d..0f80138d24 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -19,7 +19,7 @@ use crate::sync::manager::BatchProcessResult; use crate::sync::network_context::{ RangeRequestId, RpcRequestSendError, RpcResponseError, SyncNetworkContext, }; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::service::api_types::Id; use lighthouse_network::types::{BackFillState, NetworkGlobals}; @@ -55,7 +55,7 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 10; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 10; -type RpcBlocks = Vec>; +type RpcBlocks = Vec>; type BackFillBatchInfo = BatchInfo, RpcBlocks>; @@ -390,7 +390,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) -> Result { // check if we have this batch let Some(batch) = self.batches.get_mut(&batch_id) else { diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index e87ffd119e..10af1bf503 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -1,4 +1,4 @@ -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use educe::Educe; use lighthouse_network::PeerId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; @@ -449,7 +449,7 @@ impl BatchInfo { } // BatchInfo implementations for RangeSync -impl BatchInfo>> { +impl BatchInfo>> { /// Returns a BlocksByRange request associated with the batch. pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { ( diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index a287771854..98cf3e0a1f 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,6 +1,6 @@ use beacon_chain::{ BeaconChainTypes, - block_verification_types::{AvailableBlockData, RpcBlock}, + block_verification_types::{AvailableBlockData, RangeSyncBlock}, data_availability_checker::DataAvailabilityChecker, data_column_verification::CustodyDataColumn, get_block_root, @@ -200,7 +200,7 @@ impl RangeBlockComponentsRequest { &mut self, da_checker: Arc>, spec: Arc, - ) -> Option>, CouplingError>> + ) -> Option>, CouplingError>> where T: BeaconChainTypes, { @@ -288,7 +288,7 @@ impl RangeBlockComponentsRequest { blobs: Vec>>, da_checker: Arc>, spec: Arc, - ) -> Result>, CouplingError> + ) -> Result>, CouplingError> where T: BeaconChainTypes, { @@ -335,7 +335,7 @@ impl RangeBlockComponentsRequest { })?; let block_data = AvailableBlockData::new_with_blobs(blobs); responses.push( - RpcBlock::new(block, Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(block, block_data, &da_checker, spec.clone()) .map_err(|e| CouplingError::BlobPeerFailure(format!("{e:?}")))?, ) } @@ -360,7 +360,7 @@ impl RangeBlockComponentsRequest { attempt: usize, da_checker: Arc>, spec: Arc, - ) -> Result>, CouplingError> + ) -> Result>, CouplingError> where T: BeaconChainTypes, { @@ -388,12 +388,12 @@ impl RangeBlockComponentsRequest { // Now iterate all blocks ensuring that the block roots of each block and data column match, // plus we have columns for our custody requirements - let mut rpc_blocks = Vec::with_capacity(blocks.len()); + let mut range_sync_blocks = Vec::with_capacity(blocks.len()); let exceeded_retries = attempt >= MAX_COLUMN_RETRIES; for block in blocks { let block_root = get_block_root(&block); - rpc_blocks.push(if block.num_expected_blobs() > 0 { + range_sync_blocks.push(if block.num_expected_blobs() > 0 { let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root) else { let responsible_peers = column_to_peer.iter().map(|c| (*c.0, *c.1)).collect(); @@ -441,11 +441,11 @@ impl RangeBlockComponentsRequest { let block_data = AvailableBlockData::new_with_data_columns(custody_columns.iter().map(|c| c.as_data_column().clone()).collect::>()); - RpcBlock::new(block, Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(block, block_data, &da_checker, spec.clone()) .map_err(|e| CouplingError::InternalError(format!("{:?}", e)))? } else { // Block has no data, expects zero columns - RpcBlock::new(block, Some(AvailableBlockData::NoData), &da_checker, spec.clone()) + RangeSyncBlock::new(block, AvailableBlockData::NoData, &da_checker, spec.clone()) .map_err(|e| CouplingError::InternalError(format!("{:?}", e)))? }); } @@ -458,7 +458,7 @@ impl RangeBlockComponentsRequest { debug!(?remaining_roots, "Not all columns consumed for block"); } - Ok(rpc_blocks) + Ok(range_sync_blocks) } } @@ -947,7 +947,7 @@ mod tests { } let result: Result< - Vec>, + Vec>, crate::sync::block_sidecar_coupling::CouplingError, > = info.responses(da_checker.clone(), spec.clone()).unwrap(); assert!(result.is_err()); @@ -981,10 +981,10 @@ mod tests { // WHEN: Attempting to get responses again let result = info.responses(da_checker, spec).unwrap(); - // THEN: Should succeed with complete RPC blocks + // THEN: Should succeed with complete RangeSync blocks assert!(result.is_ok()); - let rpc_blocks = result.unwrap(); - assert_eq!(rpc_blocks.len(), 2); + let range_sync_blocks = result.unwrap(); + assert_eq!(range_sync_blocks.len(), 2); } #[test] diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index a909896ccb..e9d289b777 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,7 +17,8 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use crate::sync::range_data_column_batch_request::RangeDataColumnBatchRequest; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; @@ -748,7 +749,7 @@ impl SyncNetworkContext { &mut self, id: ComponentsByRangeRequestId, range_block_component: RangeBlockComponent, - ) -> Option>, RpcResponseError>> { + ) -> Option>, RpcResponseError>> { let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); return None; @@ -1673,21 +1674,15 @@ impl SyncNetworkContext { .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - let block = RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - ) - .map_err(|_| SendErrorProcessor::SendError)?; + let lookup_block = LookupBlock::new(block); - debug!(block = ?block_root, block_slot = %block.slot(), id, "Sending block for processing"); + debug!(block = ?block_root, block_slot = %lookup_block.slot(), id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - block, + lookup_block, seen_timestamp, BlockProcessType::SingleBlock { id }, ) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index e3ff638121..d533d8ed0d 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -10,7 +10,7 @@ use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; use crate::sync::{BatchProcessResult, network_context::SyncNetworkContext}; use beacon_chain::BeaconChainTypes; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; use logging::crit; @@ -40,7 +40,7 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// and continued is now in an inconsistent state. pub type ProcessingResult = Result; -type RpcBlocks = Vec>; +type RpcBlocks = Vec>; type RangeSyncBatchInfo = BatchInfo, RpcBlocks>; type RangeSyncBatches = BTreeMap>; @@ -273,7 +273,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) -> ProcessingResult { let _guard = self.span.clone().entered(); // check if we have this batch diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9fd72ac98a..6509ac3cb3 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -47,7 +47,7 @@ use crate::status::ToStatusMessage; use crate::sync::BatchProcessResult; use crate::sync::batch::BatchId; use crate::sync::network_context::{RpcResponseError, SyncNetworkContext}; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::service::api_types::Id; @@ -213,7 +213,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 769a11d976..cd872df887 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -7,6 +7,7 @@ use crate::sync::{ manager::{BlockProcessType, BlockProcessingResult, SyncManager}, }; use beacon_chain::blob_verification::KzgVerifiedBlob; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, NotifyExecutionLayer, @@ -464,7 +465,7 @@ impl TestRig { panic!("Test consumer requested unknown block: {id:?}") }) .block_data() - .and_then(|d| d.blobs()) + .blobs() .unwrap_or_else(|| panic!("Block {id:?} has no blobs")) .iter() .find(|blob| blob.index == id.index) @@ -528,7 +529,7 @@ impl TestRig { panic!("Test consumer requested unknown block: {id:?}") }) .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .unwrap_or_else(|| panic!("Block id {id:?} has no columns")); id.columns .iter() @@ -594,7 +595,7 @@ impl TestRig { // - Some blocks may not have blobs as the blob count is random let blobs = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) - .filter_map(|block| block.block_data().and_then(|d| d.blobs())) + .filter_map(|block| block.block_data().blobs()) .flat_map(|blobs| blobs.into_iter()) .collect::>(); self.send_rpc_blobs_response(req_id, peer_id, &blobs); @@ -610,7 +611,7 @@ impl TestRig { // - Some blocks may not have columns as the blob count is random let columns = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) - .filter_map(|block| block.block_data().and_then(|d| d.data_columns())) + .filter_map(|block| block.block_data().data_columns()) .flat_map(|columns| { columns .into_iter() @@ -786,10 +787,10 @@ impl TestRig { } fn corrupt_last_block_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let mut block = (*rpc_block.block_cloned()).clone(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let range_sync_block = self.get_last_block().clone(); + let mut block = (*range_sync_block.block_cloned()).clone(); + let blobs = range_sync_block.block_data().blobs(); + let columns = range_sync_block.block_data().data_columns(); *block.signature_mut() = self.valid_signature(); self.re_insert_block(Arc::new(block), blobs, columns); } @@ -801,15 +802,15 @@ impl TestRig { } fn corrupt_last_blob_proposer_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let mut blobs = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let mut blobs = range_sync_block .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs") .into_iter() .collect::>(); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let columns = range_sync_block.block_data().data_columns(); let first = blobs.first_mut().expect("empty blobs"); Arc::make_mut(first).signed_block_header.signature = self.valid_signature(); let max_blobs = @@ -822,15 +823,15 @@ impl TestRig { } fn corrupt_last_blob_kzg_proof(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let mut blobs = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let mut blobs = range_sync_block .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs") .into_iter() .collect::>(); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let columns = range_sync_block.block_data().data_columns(); let first = blobs.first_mut().expect("empty blobs"); Arc::make_mut(first).kzg_proof = kzg::KzgProof::empty(); let max_blobs = @@ -843,12 +844,12 @@ impl TestRig { } fn corrupt_last_column_proposer_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let mut columns = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let blobs = range_sync_block.block_data().blobs(); + let mut columns = range_sync_block .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("no columns"); let first = columns.first_mut().expect("empty columns"); Arc::make_mut(first) @@ -859,12 +860,12 @@ impl TestRig { } fn corrupt_last_column_kzg_proof(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let mut columns = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let blobs = range_sync_block.block_data().blobs(); + let mut columns = range_sync_block .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("no columns"); let first = columns.first_mut().expect("empty columns"); let column = Arc::make_mut(first); @@ -873,7 +874,7 @@ impl TestRig { self.re_insert_block(block, blobs, Some(columns)); } - fn get_last_block(&self) -> &RpcBlock { + fn get_last_block(&self) -> &RangeSyncBlock { let (_, last_block) = self .network_blocks_by_root .iter() @@ -893,13 +894,13 @@ impl TestRig { let block_root = block.canonical_root(); let block_slot = block.slot(); let block_data = if let Some(columns) = columns { - Some(AvailableBlockData::new_with_data_columns(columns)) + AvailableBlockData::new_with_data_columns(columns) } else if let Some(blobs) = blobs { - Some(AvailableBlockData::new_with_blobs(blobs)) + AvailableBlockData::new_with_blobs(blobs) } else { - Some(AvailableBlockData::NoData) + AvailableBlockData::NoData }; - let rpc_block = RpcBlock::new( + let range_sync_block = RangeSyncBlock::new( block, block_data, &self.harness.chain.data_availability_checker, @@ -907,8 +908,9 @@ impl TestRig { ) .unwrap(); self.network_blocks_by_slot - .insert(block_slot, rpc_block.clone()); - self.network_blocks_by_root.insert(block_root, rpc_block); + .insert(block_slot, range_sync_block.clone()); + self.network_blocks_by_root + .insert(block_root, range_sync_block); } /// Trigger a lookup with the last created block @@ -947,7 +949,7 @@ impl TestRig { /// Import a block directly into the chain without going through lookup sync async fn import_block_by_root(&mut self, block_root: Hash256) { - let rpc_block = self + let range_sync_block = self .network_blocks_by_root .get(&block_root) .unwrap_or_else(|| panic!("No block for root {block_root}")) @@ -957,9 +959,9 @@ impl TestRig { .chain .process_block( block_root, - rpc_block, + range_sync_block, NotifyExecutionLayer::Yes, - BlockImportSource::Gossip, + BlockImportSource::RangeSync, || Ok(()), ) .await @@ -979,7 +981,7 @@ impl TestRig { let blobs = self .get_last_block() .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs"); let blob = blobs.first().expect("empty blobs"); self.trigger_unknown_parent_blob(peer_id, blob.clone()); @@ -990,7 +992,7 @@ impl TestRig { let columns = self .get_last_block() .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("No data columns"); let column = columns.first().expect("empty columns"); self.trigger_unknown_parent_column(peer_id, column.clone()); @@ -1475,15 +1477,14 @@ impl TestRig { ) -> AvailabilityProcessingStatus { // Simulate importing block from another source. Don't use GossipVerified as it checks with // the clock, which does not match the timestamp in the payload. - let block_root = block.canonical_root(); - let rpc_block = RpcBlock::BlockOnly { block_root, block }; + let lookup_block = LookupBlock::new(block); self.harness .chain .process_block( - block_root, - rpc_block, + lookup_block.block_root(), + lookup_block, NotifyExecutionLayer::Yes, - BlockImportSource::Gossip, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2196,10 +2197,7 @@ async fn blobs_in_da_checker_skip_download() { }; r.build_chain(1).await; let block = r.get_last_block().clone(); - let blobs = block - .block_data() - .and_then(|d| d.blobs()) - .expect("block with no blobs"); + let blobs = block.block_data().blobs().expect("block with no blobs"); for blob in &blobs { r.insert_blob_to_da_checker(blob.clone()); } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index f00cf5841d..6e948e4726 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -3,7 +3,7 @@ use crate::sync::SyncMessage; use crate::sync::block_lookups::BlockLookupsMetrics; use crate::sync::manager::SyncManager; use crate::sync::tests::lookups::SimulateConfig; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::builder::Witness; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; @@ -77,8 +77,8 @@ struct TestRig { rng: ChaCha20Rng, fork_name: ForkName, /// Blocks that will be used in the test but may not be known to `harness` yet. - network_blocks_by_root: HashMap>, - network_blocks_by_slot: HashMap>, + network_blocks_by_root: HashMap>, + network_blocks_by_slot: HashMap>, penalties: Vec, /// All seen lookups through the test run seen_lookups: HashMap, diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 67395ccd25..c19ee8eb6d 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -10,7 +10,7 @@ use beacon_chain::block_verification_types::AvailableBlockData; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; +use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RangeSyncBlock}; use beacon_processor::WorkType; use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ @@ -430,7 +430,7 @@ impl TestRig { .chain .process_block( block_root, - build_rpc_block(block.into(), &data_sidecars, self.harness.chain.clone()), + build_range_sync_block(block.into(), &data_sidecars, self.harness.chain.clone()), NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -443,17 +443,17 @@ impl TestRig { } } -fn build_rpc_block( +fn build_range_sync_block( block: Arc>, data_sidecars: &Option>, chain: Arc>, -) -> RpcBlock { +) -> RangeSyncBlock { match data_sidecars { Some(DataSidecars::Blobs(blobs)) => { let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) @@ -466,18 +466,18 @@ fn build_rpc_block( .map(|c| c.as_data_column().clone()) .collect::>(), ); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) .unwrap() } // Block has no data, expects zero columns - None => RpcBlock::new( + None => RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &chain.data_availability_checker, chain.spec.clone(), ) diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 6b8c615631..6e01648263 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -186,10 +186,8 @@ impl LevelDB { ) }; - for (start_key, end_key) in [ - endpoints(DBColumn::BeaconState), - endpoints(DBColumn::BeaconStateSummary), - ] { + { + let (start_key, end_key) = endpoints(DBColumn::BeaconStateHotSummary); self.db.compact(&start_key, &end_key); } diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index e678a344c2..aa718e0665 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -12,9 +12,7 @@ use std::str::FromStr; use std::sync::LazyLock; use superstruct::superstruct; use types::state::HistoricalSummary; -use types::{ - BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator, execution::StatePayloadStatus, -}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator}; static EMPTY_PUBKEY: LazyLock = LazyLock::new(PublicKeyBytes::empty); @@ -655,19 +653,11 @@ impl HierarchyModuli { /// exponents [5,13,21], to reconstruct state at slot 3,000,003: if start = 3,000,002 /// layer 2 diff will point to the start snapshot instead of the layer 1 diff at /// 2998272. - /// * `payload_status` - whether the state is `Full` (came from processing a payload), or - /// `Pending` (came from processing a block). Prior to Gloas all states are `Pending`. - /// Skipped slots post-Gloas should also use a `Pending` status. - pub fn storage_strategy( - &self, - slot: Slot, - start_slot: Slot, - _payload_status: StatePayloadStatus, - ) -> Result { - // FIXME(sproul): Reverted the idea of using different storage strategies for full and - // pending states, this has the consequence of storing double diffs and double snapshots - // at full slots. The complexity of managing skipped slots was the main impetus for - // reverting the payload-status sensitive design: a Full skipped slot has no same-slot + pub fn storage_strategy(&self, slot: Slot, start_slot: Slot) -> Result { + // Initially had the idea of using different storage strategies for full and pending states, + // but it was very complex. However without this concept we end up storing two diffs/two + // snapshots at full slots. The complexity of managing skipped slots was the main impetus + // for reverting the payload-status sensitive design: a Full skipped slot has no same-slot // Pending state to replay from, so has to be handled differently from Full non-skipped // slots. match slot.cmp(&start_slot) { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c1f93ce07c..78dd69e55a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -452,26 +452,15 @@ impl HotColdDB, BeaconNodeBackend> { } impl, Cold: ItemStore> HotColdDB { - fn cold_storage_strategy( - &self, - slot: Slot, - // payload_status: StatePayloadStatus, - ) -> Result { + fn cold_storage_strategy(&self, slot: Slot) -> Result { // The start slot for the freezer HDiff is always 0 - // TODO(gloas): wire up payload_status - Ok(self - .hierarchy - .storage_strategy(slot, Slot::new(0), StatePayloadStatus::Pending)?) + Ok(self.hierarchy.storage_strategy(slot, Slot::new(0))?) } - pub fn hot_storage_strategy( - &self, - slot: Slot, - payload_status: StatePayloadStatus, - ) -> Result { + pub fn hot_storage_strategy(&self, slot: Slot) -> Result { Ok(self .hierarchy - .storage_strategy(slot, self.hot_hdiff_start_slot()?, payload_status)?) + .storage_strategy(slot, self.hot_hdiff_start_slot()?)?) } pub fn hot_hdiff_start_slot(&self) -> Result { @@ -1402,8 +1391,7 @@ impl, Cold: ItemStore> HotColdDB // Use `Pending` status here because snapshots and diffs are only stored for // `Pending` states. if let Some(slot) = slot - && let Ok(strategy) = - self.hot_storage_strategy(slot, StatePayloadStatus::Pending) + && let Ok(strategy) = self.hot_storage_strategy(slot) { match strategy { StorageStrategy::Snapshot => { @@ -1675,8 +1663,6 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - let payload_status = state.payload_status(); - match self.state_cache.lock().put_state( *state_root, state.get_latest_block_root(*state_root), @@ -1722,7 +1708,7 @@ impl, Cold: ItemStore> HotColdDB debug!( ?state_root, slot = %state.slot(), - storage_strategy = ?self.hot_storage_strategy(state.slot(), payload_status)?, + storage_strategy = ?self.hot_storage_strategy(state.slot())?, diff_base_state = %summary.diff_base_state, previous_state_root = ?summary.previous_state_root, "Storing hot state summary and diffs" @@ -1745,7 +1731,7 @@ impl, Cold: ItemStore> HotColdDB self, *state_root, state, - self.hot_storage_strategy(state.slot(), state.payload_status())?, + self.hot_storage_strategy(state.slot())?, )?; ops.push(hot_state_summary.as_kv_store_op(*state_root)); Ok(hot_state_summary) @@ -1758,7 +1744,7 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) -> Result<(), Error> { let slot = state.slot(); - let storage_strategy = self.hot_storage_strategy(slot, state.payload_status())?; + let storage_strategy = self.hot_storage_strategy(slot)?; match storage_strategy { StorageStrategy::ReplayFrom(_) => { // Already have persisted the state summary, don't persist anything else @@ -1896,7 +1882,7 @@ impl, Cold: ItemStore> HotColdDB // Load the hot state summary for the previous state. // - // If it has the same slot as this summary then we know this summary is for a `Full` block + // If it has the same slot as this summary then we know this summary is for a `Full` state // (payload state), because they are always diffed against their same-slot `Pending` state. // // If the previous summary has a different slot AND the latest block is from `summary.slot`, @@ -1920,6 +1906,51 @@ impl, Cold: ItemStore> HotColdDB } } + /// Recompute the payload status for a state at `slot` that is stored in the cold DB. + /// + /// This function returns an error for any `slot` that is outside the range of slots stored in + /// the freezer DB. + /// + /// For all slots prior to Gloas, it returns `Pending`. + /// + /// For post-Gloas slots the algorithm is: + /// + /// 1. Load the most recently applied block at `slot` (may not be from `slot` in case of a skip) + /// 2. Load the canonical `state_root` at the slot of the block. If this `state_root` matches + /// the one in the block then we know the state at *that* slot is canonically empty (no + /// payload). Conversely, if it is different, we know that the block's slot is full (assuming + /// no database corruption). + /// 3. The payload status of `slot` is the same as the payload status of `block.slot()`, because + /// we only care about whether a beacon block or payload was applied most recently, and + /// `block` is by definition the most-recently-applied block. + /// + /// All of this mucking around could be avoided if we do a schema migration to record the + /// payload status in the database. For now, this is simpler. + fn get_cold_state_payload_status(&self, slot: Slot) -> Result { + // Pre-Gloas states are always `Pending`. + if !self.spec.fork_name_at_slot::(slot).gloas_enabled() { + return Ok(StatePayloadStatus::Pending); + } + + let block_root = self + .get_cold_block_root(slot)? + .ok_or(HotColdDBError::MissingFrozenBlock(slot))?; + + let block = self + .get_blinded_block(&block_root)? + .ok_or(Error::MissingBlock(block_root))?; + + let state_root = self + .get_cold_state_root(block.slot())? + .ok_or(HotColdDBError::MissingRestorePointState(block.slot()))?; + + if block.state_root() != state_root { + Ok(StatePayloadStatus::Full) + } else { + Ok(StatePayloadStatus::Pending) + } + } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -1929,20 +1960,16 @@ impl, Cold: ItemStore> HotColdDB return Ok(buffer); } - let Some( - summary @ HotStateSummary { - slot, - diff_base_state, - .. - }, - ) = self.load_hot_state_summary(&state_root)? + let Some(HotStateSummary { + slot, + diff_base_state, + .. + }) = self.load_hot_state_summary(&state_root)? else { return Err(Error::MissingHotStateSummary(state_root)); }; - let payload_status = self.get_hot_state_summary_payload_status(&summary)?; - - let buffer = match self.hot_storage_strategy(slot, payload_status)? { + let buffer = match self.hot_storage_strategy(slot)? { StorageStrategy::Snapshot => { let Some(state) = self.load_hot_state_as_snapshot(state_root)? else { let existing_snapshots = self.load_hot_state_snapshot_roots()?; @@ -2035,7 +2062,7 @@ impl, Cold: ItemStore> HotColdDB ?payload_status, "Loading hot state" ); - let mut state = match self.hot_storage_strategy(slot, payload_status)? { + let mut state = match self.hot_storage_strategy(slot)? { strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { let buffer_timer = metrics::start_timer_vec( &metrics::BEACON_HDIFF_BUFFER_LOAD_TIME, @@ -2472,8 +2499,7 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - // TODO(gloas): calculate correct payload status for cold states - let payload_status = StatePayloadStatus::Pending; + let payload_status = self.get_cold_state_payload_status(slot)?; let state = self.replay_blocks( base_state, blocks, @@ -2609,9 +2635,10 @@ impl, Cold: ItemStore> HotColdDB { return Ok((blocks, vec![])); } - // TODO(gloas): wire this up - let end_block_root = Hash256::ZERO; - let desired_payload_status = StatePayloadStatus::Pending; + let end_block_root = self + .get_cold_block_root(end_slot)? + .ok_or(HotColdDBError::MissingFrozenBlock(end_slot))?; + let desired_payload_status = self.get_cold_state_payload_status(end_slot)?; let envelopes = self.load_payload_envelopes_for_blocks( &blocks, end_block_root, @@ -2630,7 +2657,6 @@ impl, Cold: ItemStore> HotColdDB /// Payloads are also returned in slot-ascending order, but only payloads forming part of /// the chain are loaded (payloads for EMPTY slots are omitted). Prior to Gloas, an empty /// vec of payloads will be returned. - // TODO(gloas): handle last payload #[allow(clippy::type_complexity)] pub fn load_blocks_to_replay( &self, @@ -2716,7 +2742,6 @@ impl, Cold: ItemStore> HotColdDB } // Load the payload for the last block if desired. - // TODO(gloas): check that we don't load a duplicate in the case of a skipped slot if let StatePayloadStatus::Full = desired_payload_status { let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), @@ -3245,12 +3270,10 @@ impl, Cold: ItemStore> HotColdDB Some(mut split) => { debug!(?split, "Loaded split partial"); // Load the hot state summary to get the block root. - let latest_block_root = self - .load_block_root_from_summary_any_version(&split.state_root) - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; + let latest_block_root = + self.load_block_root_from_summary(&split.state_root).ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )?; split.block_root = latest_block_root; Ok(Some(split)) } @@ -3281,29 +3304,11 @@ impl, Cold: ItemStore> HotColdDB .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) } - /// Load a hot state's summary in V22 format, given its root. - pub fn load_hot_state_summary_v22( - &self, - state_root: &Hash256, - ) -> Result, Error> { - self.hot_db - .get(state_root) - .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) - } - - /// Load the latest block root for a hot state summary either in modern form, or V22 form. - /// - /// This function is required to open a V22 database for migration to V24, or vice versa. - pub fn load_block_root_from_summary_any_version( - &self, - state_root: &Hash256, - ) -> Option { + /// Load the latest block root for a hot state summary. + pub fn load_block_root_from_summary(&self, state_root: &Hash256) -> Option { if let Ok(Some(summary)) = self.load_hot_state_summary(state_root) { return Some(summary.latest_block_root); } - if let Ok(Some(summary)) = self.load_hot_state_summary_v22(state_root) { - return Some(summary.latest_block_root); - } None } @@ -4262,30 +4267,6 @@ impl HotStateSummary { } } -/// Legacy hot state summary used in schema V22 and before. -/// -/// This can be deleted when we remove V22 support. -#[derive(Debug, Clone, Copy, Encode, Decode)] -pub struct HotStateSummaryV22 { - pub slot: Slot, - pub latest_block_root: Hash256, - pub epoch_boundary_state_root: Hash256, -} - -impl StoreItem for HotStateSummaryV22 { - fn db_column() -> DBColumn { - DBColumn::BeaconStateSummary - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - /// Struct for summarising a state in the freezer database. #[derive(Debug, Clone, Copy, Default, Encode, Decode)] pub(crate) struct ColdStateSummary { diff --git a/beacon_node/store/src/invariants.rs b/beacon_node/store/src/invariants.rs new file mode 100644 index 0000000000..d251fb8800 --- /dev/null +++ b/beacon_node/store/src/invariants.rs @@ -0,0 +1,796 @@ +//! Database invariant checks for the hot and cold databases. +//! +//! These checks verify the consistency of data stored in the database. They are designed to be +//! called from the HTTP API and from tests to detect data corruption or bugs in the store logic. +//! +//! See the `check_invariants` and `check_database_invariants` methods for the full list. + +use crate::hdiff::StorageStrategy; +use crate::hot_cold_store::{ColdStateSummary, HotStateSummary}; +use crate::{DBColumn, Error, ItemStore}; +use crate::{HotColdDB, Split}; +use serde::Serialize; +use ssz::Decode; +use std::cmp; +use std::collections::HashSet; +use types::*; + +/// Result of running invariant checks on the database. +#[derive(Debug, Clone, Serialize)] +pub struct InvariantCheckResult { + /// List of invariant violations found. + pub violations: Vec, +} + +impl InvariantCheckResult { + pub fn new() -> Self { + Self { + violations: Vec::new(), + } + } + + pub fn is_ok(&self) -> bool { + self.violations.is_empty() + } + + pub fn add_violation(&mut self, violation: InvariantViolation) { + self.violations.push(violation); + } + + pub fn merge(&mut self, other: InvariantCheckResult) { + self.violations.extend(other.violations); + } +} + +impl Default for InvariantCheckResult { + fn default() -> Self { + Self::new() + } +} + +/// Context data from the beacon chain needed for invariant checks. +/// +/// This allows all invariant checks to live in the store crate while still checking +/// invariants that depend on fork choice, state cache, and custody context. +pub struct InvariantContext { + /// Block roots tracked by fork choice (invariant 1). + pub fork_choice_blocks: Vec<(Hash256, Slot)>, + /// State roots held in the in-memory state cache (invariant 8). + pub state_cache_roots: Vec, + /// Custody columns for the current epoch (invariant 7). + pub custody_columns: Vec, + /// Compressed pubkey bytes from the in-memory validator pubkey cache, indexed by validator index + /// (invariant 9). + pub pubkey_cache_pubkeys: Vec>, +} + +/// A single invariant violation. +#[derive(Debug, Clone, Serialize)] +pub enum InvariantViolation { + /// Invariant 1: fork choice block consistency. + /// + /// ```text + /// block in fork_choice && descends_from_finalized -> block in hot_db + /// ``` + ForkChoiceBlockMissing { block_root: Hash256, slot: Slot }, + /// Invariant 2: block and state consistency. + /// + /// ```text + /// block in hot_db && block.slot >= split.slot + /// -> state_summary for block.state_root() in hot_db + /// ``` + HotBlockMissingStateSummary { + block_root: Hash256, + slot: Slot, + state_root: Hash256, + }, + /// Invariant 3: state summary diff consistency. + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db according to hierarchy rules + /// ``` + HotStateMissingSnapshot { state_root: Hash256, slot: Slot }, + /// Invariant 3: state summary diff consistency (missing diff). + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db according to hierarchy rules + /// ``` + HotStateMissingDiff { state_root: Hash256, slot: Slot }, + /// Invariant 3: DiffFrom/ReplayFrom base slot must reference an existing summary. + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db according to hierarchy rules + /// ``` + HotStateBaseSummaryMissing { + slot: Slot, + base_state_root: Hash256, + }, + /// Invariant 4: state summary chain consistency. + /// + /// ```text + /// state_summary in hot_db && state_summary.slot > split.slot + /// -> state_summary for previous_state_root in hot_db + /// ``` + HotStateMissingPreviousSummary { + slot: Slot, + previous_state_root: Hash256, + }, + /// Invariant 5: block and execution payload consistency. + /// + /// ```text + /// block in hot_db && !prune_payloads -> payload for block.root in hot_db + /// ``` + ExecutionPayloadMissing { block_root: Hash256, slot: Slot }, + /// Invariant 6: block and blobs consistency. + /// + /// ```text + /// block in hot_db && num_blob_commitments > 0 + /// -> blob_list for block.root in hot_db + /// ``` + BlobSidecarMissing { block_root: Hash256, slot: Slot }, + /// Invariant 7: block and data columns consistency. + /// + /// ```text + /// block in hot_db && num_blob_commitments > 0 + /// && block.slot >= earliest_available_slot + /// && data_column_idx in custody_columns + /// -> (block_root, data_column_idx) in hot_db + /// ``` + DataColumnMissing { + block_root: Hash256, + slot: Slot, + column_index: ColumnIndex, + }, + /// Invariant 8: state cache and disk consistency. + /// + /// ```text + /// state in state_cache -> state_summary in hot_db + /// ``` + StateCacheMissingSummary { state_root: Hash256 }, + /// Invariant 9: pubkey cache consistency. + /// + /// ```text + /// state_summary in hot_db + /// -> all validator pubkeys from state.validators are in the hot_db + /// ``` + PubkeyCacheMissing { validator_index: usize }, + /// Invariant 9b: pubkey cache value mismatch. + /// + /// ```text + /// pubkey_cache[i] == hot_db(PubkeyCache)[i] + /// ``` + PubkeyCacheMismatch { validator_index: usize }, + /// Invariant 10: block root indices mapping. + /// + /// ```text + /// oldest_block_slot <= i < split.slot + /// -> block_root for slot i in cold_db + /// && block for block_root in hot_db + /// ``` + ColdBlockRootMissing { + slot: Slot, + oldest_block_slot: Slot, + split_slot: Slot, + }, + /// Invariant 10: block root index references a block that must exist. + /// + /// ```text + /// oldest_block_slot <= i < split.slot + /// -> block_root for slot i in cold_db + /// && block for block_root in hot_db + /// ``` + ColdBlockRootOrphan { slot: Slot, block_root: Hash256 }, + /// Invariant 11: state root indices mapping. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + ColdStateRootMissing { + slot: Slot, + state_lower_limit: Slot, + state_upper_limit: Slot, + split_slot: Slot, + }, + /// Invariant 11: state root index must have a cold state summary. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + ColdStateRootMissingSummary { slot: Slot, state_root: Hash256 }, + /// Invariant 11: cold state summary slot must match index slot. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + ColdStateRootSlotMismatch { + slot: Slot, + state_root: Hash256, + summary_slot: Slot, + }, + /// Invariant 12: cold state diff consistency. + /// + /// ```text + /// cold_state_summary in cold_db + /// -> slot |-> state diff/snapshot/nothing in cold_db according to diff hierarchy + /// ``` + ColdStateMissingSnapshot { state_root: Hash256, slot: Slot }, + /// Invariant 12: cold state diff consistency (missing diff). + /// + /// ```text + /// cold_state_summary in cold_db + /// -> slot |-> state diff/snapshot/nothing in cold_db according to diff hierarchy + /// ``` + ColdStateMissingDiff { state_root: Hash256, slot: Slot }, + /// Invariant 12: DiffFrom/ReplayFrom base slot must reference an existing summary. + /// + /// ```text + /// cold_state_summary in cold_db + /// -> slot |-> state diff/snapshot/nothing in cold_db according to diff hierarchy + /// ``` + ColdStateBaseSummaryMissing { slot: Slot, base_slot: Slot }, +} + +impl, Cold: ItemStore> HotColdDB { + /// Run all database invariant checks. + /// + /// The `ctx` parameter provides data from the beacon chain layer (fork choice, state cache, + /// custody columns, pubkey cache) so that all invariant checks can live in this single file. + pub fn check_invariants(&self, ctx: &InvariantContext) -> Result { + let mut result = InvariantCheckResult::new(); + let split = self.get_split_info(); + + result.merge(self.check_fork_choice_block_consistency(ctx)?); + result.merge(self.check_hot_block_invariants(&split, ctx)?); + result.merge(self.check_hot_state_summary_diff_consistency()?); + result.merge(self.check_hot_state_summary_chain_consistency(&split)?); + result.merge(self.check_state_cache_consistency(ctx)?); + result.merge(self.check_cold_block_root_indices(&split)?); + result.merge(self.check_cold_state_root_indices(&split)?); + result.merge(self.check_cold_state_diff_consistency()?); + result.merge(self.check_pubkey_cache_consistency(ctx)?); + + Ok(result) + } + + /// Invariant 1 (Hot DB): Fork choice block consistency. + /// + /// ```text + /// block in fork_choice && descends_from_finalized -> block in hot_db + /// ``` + /// + /// Every canonical fork choice block (descending from finalized) must exist in the hot + /// database. Pruned non-canonical fork blocks may linger in the proto-array and are + /// excluded from this check. + fn check_fork_choice_block_consistency( + &self, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + for &(block_root, slot) in &ctx.fork_choice_blocks { + let exists = self + .hot_db + .key_exists(DBColumn::BeaconBlock, block_root.as_slice())?; + if !exists { + result + .add_violation(InvariantViolation::ForkChoiceBlockMissing { block_root, slot }); + } + } + + Ok(result) + } + + /// Invariants 2, 5, 6, 7 (Hot DB): Block-related consistency checks. + /// + /// Iterates hot DB blocks once and checks: + /// - Invariant 2: block-state summary consistency + /// - Invariant 5: execution payload consistency (when prune_payloads=false) + /// - Invariant 6: blob sidecar consistency (Deneb to Fulu) + /// - Invariant 7: data column consistency (post-Fulu, when custody_columns provided) + fn check_hot_block_invariants( + &self, + split: &Split, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + let check_payloads = !self.get_config().prune_payloads; + let bellatrix_fork_slot = self + .spec + .bellatrix_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let deneb_fork_slot = self + .spec + .deneb_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let fulu_fork_slot = self + .spec + .fulu_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let gloas_fork_slot = self + .spec + .gloas_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let oldest_blob_slot = self.get_blob_info().oldest_blob_slot; + let oldest_data_column_slot = self.get_data_column_info().oldest_data_column_slot; + + for res in self.hot_db.iter_column::(DBColumn::BeaconBlock) { + let (block_root, block_bytes) = res?; + let block = SignedBlindedBeaconBlock::::from_ssz_bytes(&block_bytes, &self.spec)?; + let slot = block.slot(); + + // Invariant 2: block-state consistency. + if slot >= split.slot { + let state_root = block.state_root(); + let has_summary = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSummary, state_root.as_slice())?; + if !has_summary { + result.add_violation(InvariantViolation::HotBlockMissingStateSummary { + block_root, + slot, + state_root, + }); + } + } + + // Invariant 5: execution payload consistency. + if check_payloads + && let Some(bellatrix_slot) = bellatrix_fork_slot + && slot >= bellatrix_slot + { + if let Some(gloas_slot) = gloas_fork_slot + && slot >= gloas_slot + { + // For Gloas there is never a true payload stored at slot 0. + // TODO(gloas): still need to account for non-canonical payloads once pruning + // is implemented. + if slot != 0 && !self.payload_envelope_exists(&block_root)? { + result.add_violation(InvariantViolation::ExecutionPayloadMissing { + block_root, + slot, + }); + } + } else if !self.execution_payload_exists(&block_root)? { + result.add_violation(InvariantViolation::ExecutionPayloadMissing { + block_root, + slot, + }); + } + } + + // Invariant 6: blob sidecar consistency. + // Only check blocks that actually have blob KZG commitments — blocks with 0 + // commitments legitimately have no blob sidecars stored. + if let Some(deneb_slot) = deneb_fork_slot + && let Some(oldest_blob) = oldest_blob_slot + && slot >= deneb_slot + && slot >= oldest_blob + && fulu_fork_slot.is_none_or(|fulu_slot| slot < fulu_slot) + && block.num_expected_blobs() > 0 + { + let has_blob = self + .blobs_db + .key_exists(DBColumn::BeaconBlob, block_root.as_slice())?; + if !has_blob { + result + .add_violation(InvariantViolation::BlobSidecarMissing { block_root, slot }); + } + } + + // Invariant 7: data column consistency. + // Only check blocks that actually have blob KZG commitments. + // TODO(gloas): reconsider this invariant — non-canonical payloads won't have + // their data column sidecars stored. + if !ctx.custody_columns.is_empty() + && let Some(fulu_slot) = fulu_fork_slot + && let Some(oldest_dc) = oldest_data_column_slot + && slot >= fulu_slot + && slot >= oldest_dc + && block.num_expected_blobs() > 0 + { + let stored_columns = self.get_data_column_keys(block_root)?; + for col_idx in &ctx.custody_columns { + if !stored_columns.contains(col_idx) { + result.add_violation(InvariantViolation::DataColumnMissing { + block_root, + slot, + column_index: *col_idx, + }); + } + } + } + } + + Ok(result) + } + + /// Invariant 3 (Hot DB): State summary diff/snapshot consistency. + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db per HDiff hierarchy rules + /// ``` + /// + /// Each hot state summary should have the correct storage artifact (snapshot, diff, or + /// nothing) according to the HDiff hierarchy configuration. The hierarchy uses the + /// anchor_slot as its start point for the hot DB. + fn check_hot_state_summary_diff_consistency(&self) -> Result { + let mut result = InvariantCheckResult::new(); + + let anchor_slot = self.get_anchor_info().anchor_slot; + + // Collect all summary slots and their strategies in a first pass. + let mut known_state_roots = HashSet::new(); + let mut base_state_refs: Vec<(Slot, Hash256)> = Vec::new(); + + for res in self + .hot_db + .iter_column::(DBColumn::BeaconStateHotSummary) + { + let (state_root, value) = res?; + let summary = HotStateSummary::from_ssz_bytes(&value)?; + + known_state_roots.insert(state_root); + + match self.hierarchy.storage_strategy(summary.slot, anchor_slot)? { + StorageStrategy::Snapshot => { + let has_snapshot = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSnapshot, state_root.as_slice())?; + if !has_snapshot { + result.add_violation(InvariantViolation::HotStateMissingSnapshot { + state_root, + slot: summary.slot, + }); + } + } + StorageStrategy::DiffFrom(base_slot) => { + let has_diff = self + .hot_db + .key_exists(DBColumn::BeaconStateHotDiff, state_root.as_slice())?; + if !has_diff { + result.add_violation(InvariantViolation::HotStateMissingDiff { + state_root, + slot: summary.slot, + }); + } + if let Ok(base_root) = summary.diff_base_state.get_root(base_slot) { + base_state_refs.push((summary.slot, base_root)); + } + } + StorageStrategy::ReplayFrom(base_slot) => { + if let Ok(base_root) = summary.diff_base_state.get_root(base_slot) { + base_state_refs.push((summary.slot, base_root)); + } + } + } + } + + // Verify that all diff base state roots reference existing summaries. + for (slot, base_state_root) in base_state_refs { + if !known_state_roots.contains(&base_state_root) { + result.add_violation(InvariantViolation::HotStateBaseSummaryMissing { + slot, + base_state_root, + }); + } + } + + Ok(result) + } + + /// Invariant 4 (Hot DB): State summary chain consistency. + /// + /// ```text + /// state_summary in hot_db && state_summary.slot > split.slot + /// -> state_summary for previous_state_root in hot_db + /// ``` + /// + /// The chain of `previous_state_root` links must be continuous back to the split state. + /// The split state itself is the boundary and does not need a predecessor in the hot DB. + fn check_hot_state_summary_chain_consistency( + &self, + split: &Split, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + for res in self + .hot_db + .iter_column::(DBColumn::BeaconStateHotSummary) + { + let (_state_root, value) = res?; + let summary = HotStateSummary::from_ssz_bytes(&value)?; + + if summary.slot > split.slot { + let prev_root = summary.previous_state_root; + let has_prev = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSummary, prev_root.as_slice())?; + if !has_prev { + result.add_violation(InvariantViolation::HotStateMissingPreviousSummary { + slot: summary.slot, + previous_state_root: prev_root, + }); + } + } + } + + Ok(result) + } + + /// Invariant 8 (Hot DB): State cache and disk consistency. + /// + /// ```text + /// state in state_cache -> state_summary in hot_db + /// ``` + /// + /// Every state held in the in-memory state cache (including the finalized state) should + /// have a corresponding hot state summary on disk. + fn check_state_cache_consistency( + &self, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + for &state_root in &ctx.state_cache_roots { + let has_summary = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSummary, state_root.as_slice())?; + if !has_summary { + result.add_violation(InvariantViolation::StateCacheMissingSummary { state_root }); + } + } + + Ok(result) + } + + /// Invariant 10 (Cold DB): Block root indices. + /// + /// ```text + /// oldest_block_slot <= i < split.slot + /// -> block_root for slot i in cold_db + /// && block for block_root in hot_db + /// ``` + /// + /// Every slot in the cold range (from `oldest_block_slot` to `split.slot`) should have a + /// block root index entry, and the referenced block should exist in the hot DB. Note that + /// skip slots store the most recent non-skipped block's root, so `block.slot()` may differ + /// from the index slot. + fn check_cold_block_root_indices(&self, split: &Split) -> Result { + let mut result = InvariantCheckResult::new(); + + let anchor_info = self.get_anchor_info(); + + if anchor_info.oldest_block_slot >= split.slot { + return Ok(result); + } + + for slot_val in anchor_info.oldest_block_slot.as_u64()..split.slot.as_u64() { + let slot = Slot::new(slot_val); + + let slot_bytes = slot_val.to_be_bytes(); + let block_root_bytes = self + .cold_db + .get_bytes(DBColumn::BeaconBlockRoots, &slot_bytes)?; + + let Some(root_bytes) = block_root_bytes else { + result.add_violation(InvariantViolation::ColdBlockRootMissing { + slot, + oldest_block_slot: anchor_info.oldest_block_slot, + split_slot: split.slot, + }); + continue; + }; + + if root_bytes.len() != 32 { + return Err(Error::InvalidKey(format!( + "cold block root at slot {slot} has invalid length {}", + root_bytes.len() + ))); + } + + let block_root = Hash256::from_slice(&root_bytes); + let block_exists = self + .hot_db + .key_exists(DBColumn::BeaconBlock, block_root.as_slice())?; + if !block_exists { + result.add_violation(InvariantViolation::ColdBlockRootOrphan { slot, block_root }); + } + } + + Ok(result) + } + + /// Invariant 11 (Cold DB): State root indices. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + fn check_cold_state_root_indices(&self, split: &Split) -> Result { + let mut result = InvariantCheckResult::new(); + + let anchor_info = self.get_anchor_info(); + + // Expected slots are: (i <= state_lower_limit || i >= effective_upper) && i < split.slot + // where effective_upper = min(split.slot, state_upper_limit). + for slot_val in 0..split.slot.as_u64() { + let slot = Slot::new(slot_val); + + if slot <= anchor_info.state_lower_limit + || slot >= cmp::min(split.slot, anchor_info.state_upper_limit) + { + let slot_bytes = slot_val.to_be_bytes(); + let Some(root_bytes) = self + .cold_db + .get_bytes(DBColumn::BeaconStateRoots, &slot_bytes)? + else { + result.add_violation(InvariantViolation::ColdStateRootMissing { + slot, + state_lower_limit: anchor_info.state_lower_limit, + state_upper_limit: anchor_info.state_upper_limit, + split_slot: split.slot, + }); + continue; + }; + + if root_bytes.len() != 32 { + return Err(Error::InvalidKey(format!( + "cold state root at slot {slot} has invalid length {}", + root_bytes.len() + ))); + } + + let state_root = Hash256::from_slice(&root_bytes); + + match self + .cold_db + .get_bytes(DBColumn::BeaconColdStateSummary, state_root.as_slice())? + { + None => { + result.add_violation(InvariantViolation::ColdStateRootMissingSummary { + slot, + state_root, + }); + } + Some(summary_bytes) => { + let summary = ColdStateSummary::from_ssz_bytes(&summary_bytes)?; + if summary.slot != slot { + result.add_violation(InvariantViolation::ColdStateRootSlotMismatch { + slot, + state_root, + summary_slot: summary.slot, + }); + } + } + } + } + } + + Ok(result) + } + + /// Invariant 12 (Cold DB): Cold state diff/snapshot consistency. + /// + /// ```text + /// cold_state_summary in cold_db + /// -> state diff/snapshot/nothing in cold_db per HDiff hierarchy rules + /// ``` + /// + /// Each cold state summary should have the correct storage artifact according to the + /// HDiff hierarchy. Cold states always use genesis (slot 0) as the hierarchy start since + /// they are finalized and have no anchor_slot dependency. + fn check_cold_state_diff_consistency(&self) -> Result { + let mut result = InvariantCheckResult::new(); + + let mut summary_slots = HashSet::new(); + let mut base_slot_refs = Vec::new(); + + for res in self + .cold_db + .iter_column::(DBColumn::BeaconColdStateSummary) + { + let (state_root, value) = res?; + let summary = ColdStateSummary::from_ssz_bytes(&value)?; + + summary_slots.insert(summary.slot); + + let slot_bytes = summary.slot.as_u64().to_be_bytes(); + + match self + .hierarchy + .storage_strategy(summary.slot, Slot::new(0))? + { + StorageStrategy::Snapshot => { + let has_snapshot = self + .cold_db + .key_exists(DBColumn::BeaconStateSnapshot, &slot_bytes)?; + if !has_snapshot { + result.add_violation(InvariantViolation::ColdStateMissingSnapshot { + state_root, + slot: summary.slot, + }); + } + } + StorageStrategy::DiffFrom(base_slot) => { + let has_diff = self + .cold_db + .key_exists(DBColumn::BeaconStateDiff, &slot_bytes)?; + if !has_diff { + result.add_violation(InvariantViolation::ColdStateMissingDiff { + state_root, + slot: summary.slot, + }); + } + base_slot_refs.push((summary.slot, base_slot)); + } + StorageStrategy::ReplayFrom(base_slot) => { + base_slot_refs.push((summary.slot, base_slot)); + } + } + } + + // Verify that all DiffFrom/ReplayFrom base slots reference existing summaries. + for (slot, base_slot) in base_slot_refs { + if !summary_slots.contains(&base_slot) { + result.add_violation(InvariantViolation::ColdStateBaseSummaryMissing { + slot, + base_slot, + }); + } + } + + Ok(result) + } + + /// Invariant 9 (Hot DB): Pubkey cache consistency. + /// + /// ```text + /// all validator pubkeys from states are in hot_db(PubkeyCache) + /// ``` + /// + /// Checks that the in-memory pubkey cache and the on-disk PubkeyCache column have the same + /// number of entries AND that each pubkey matches at every validator index. + fn check_pubkey_cache_consistency( + &self, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + // Read on-disk pubkeys by sequential validator index (matching how they are stored + // with Hash256::from_low_u64_be(index) as key). + // Iterate in-memory pubkeys and verify each matches on disk. + for (validator_index, in_memory_bytes) in ctx.pubkey_cache_pubkeys.iter().enumerate() { + let mut key = [0u8; 32]; + key[24..].copy_from_slice(&(validator_index as u64).to_be_bytes()); + match self.hot_db.get_bytes(DBColumn::PubkeyCache, &key)? { + Some(on_disk_bytes) if in_memory_bytes != &on_disk_bytes => { + result + .add_violation(InvariantViolation::PubkeyCacheMismatch { validator_index }); + } + None => { + result + .add_violation(InvariantViolation::PubkeyCacheMissing { validator_index }); + } + _ => {} + } + } + + Ok(result) + } +} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 3363eb800c..bd8caa3ad5 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -15,6 +15,7 @@ pub mod hdiff; pub mod historic_state_cache; pub mod hot_cold_store; mod impls; +pub mod invariants; mod memory_store; pub mod metadata; pub mod metrics; @@ -76,11 +77,7 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn compact(&self) -> Result<(), Error> { // Compact state and block related columns as they are likely to have the most churn, // i.e. entries being created and deleted. - for column in [ - DBColumn::BeaconState, - DBColumn::BeaconStateHotSummary, - DBColumn::BeaconBlock, - ] { + for column in [DBColumn::BeaconStateHotSummary, DBColumn::BeaconBlock] { self.compact_column(column)?; } Ok(()) diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 86860ac5f8..d016922ade 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -111,6 +111,19 @@ impl StateCache { self.hdiff_buffers.mem_usage() } + /// Return all state roots currently held in the cache, including the finalized state. + pub fn state_roots(&self) -> Vec { + let mut roots: Vec = self + .states + .iter() + .map(|(&state_root, _)| state_root) + .collect(); + if let Some(ref finalized) = self.finalized_state { + roots.push(finalized.state_root); + } + roots + } + pub fn update_finalized_state( &mut self, state_root: Hash256, @@ -332,7 +345,12 @@ impl StateCache { } pub fn delete_block_states(&mut self, block_root: &Hash256) { - if let Some(slot_map) = self.block_map.delete_block_states(block_root) { + let (pending_state_roots, full_state_roots) = + self.block_map.delete_block_states(block_root); + for slot_map in [pending_state_roots, full_state_roots] + .into_iter() + .flatten() + { for state_root in slot_map.slots.values() { self.states.pop(state_root); } @@ -443,11 +461,12 @@ impl BlockMap { }); } - fn delete_block_states(&mut self, block_root: &Hash256) -> Option { - // TODO(gloas): update return type - self.blocks + fn delete_block_states(&mut self, block_root: &Hash256) -> (Option, Option) { + let pending_state_roots = self + .blocks .remove(&(*block_root, StatePayloadStatus::Pending)); - self.blocks.remove(&(*block_root, StatePayloadStatus::Full)) + let full_state_roots = self.blocks.remove(&(*block_root, StatePayloadStatus::Full)); + (pending_state_roots, full_state_roots) } } diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 0442bf4ec0..c2e4fbdd5a 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -512,180 +512,6 @@ As all testnets and Mainnet have been merged, both values will be the same after } ``` -## `/lighthouse/analysis/attestation_performance/{index}` - -Fetch information about the attestation performance of a validator index or all validators for a -range of consecutive epochs. - -Two query parameters are required: - -- `start_epoch` (inclusive): the first epoch to compute attestation performance for. -- `end_epoch` (inclusive): the final epoch to compute attestation performance for. - -Example: - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/1?start_epoch=1&end_epoch=1" | jq -``` - -```json -[ - { - "index": 1, - "epochs": { - "1": { - "active": true, - "head": true, - "target": true, - "source": true, - "delay": 1 - } - } - } -] -``` - -Instead of specifying a validator index, you can specify the entire validator set by using `global`: - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=1&end_epoch=1" | jq -``` - -```json -[ - { - "index": 0, - "epochs": { - "1": { - "active": true, - "head": true, - "target": true, - "source": true, - "delay": 1 - } - } - }, - { - "index": 1, - "epochs": { - "1": { - "active": true, - "head": true, - "target": true, - "source": true, - "delay": 1 - } - } - }, - { - .. - } -] - -``` - -Caveats: - -- For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state *prior* to the `start_epoch` needs to be loaded from the database, - and loading a state on a boundary is most efficient. - -## `/lighthouse/analysis/block_rewards` - -Fetch information about the block rewards paid to proposers for a range of consecutive blocks. - -Two query parameters are required: - -- `start_slot` (inclusive): the slot of the first block to compute rewards for. -- `end_slot` (inclusive): the slot of the last block to compute rewards for. - -Example: - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=1" | jq -``` - -The first few lines of the response would look like: - -```json -[ - { - "total": 637260, - "block_root": "0x4a089c5e390bb98e66b27358f157df825128ea953cee9d191229c0bcf423a4f6", - "meta": { - "slot": "1", - "parent_slot": "0", - "proposer_index": 93, - "graffiti": "EF #vm-eth2-raw-iron-101" - }, - "attestation_rewards": { - "total": 637260, - "prev_epoch_total": 0, - "curr_epoch_total": 637260, - "per_attestation_rewards": [ - { - "50102": 780, - } - ] - } - } -] -``` - -Caveats: - -- Presently only attestation and sync committee rewards are computed. -- The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] - in the source. -- For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. - This is because the state *prior* to the `start_slot` needs to be loaded from the database, and - loading a state on a boundary is most efficient. - -[block_reward_src]: -https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs - -## `/lighthouse/analysis/block_packing` - -Fetch information about the block packing efficiency of blocks for a range of consecutive -epochs. - -Two query parameters are required: - -- `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. -- `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq -``` - -An excerpt of the response looks like: - -```json -[ - { - "slot": "33", - "block_hash": "0xb20970bb97c6c6de6b1e2b689d6381dd15b3d3518fbaee032229495f963bd5da", - "proposer_info": { - "validator_index": 855, - "graffiti": "poapZoJ7zWNfK7F3nWjEausWVBvKa6gA" - }, - "available_attestations": 3805, - "included_attestations": 1143, - "prior_skip_slots": 1 - }, - { - .. - } -] -``` - -Caveats: - -- `start_epoch` must not be `0`. -- For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and - loading a state on a boundary is most efficient. - ## `/lighthouse/logs` This is a Server Side Event subscription endpoint. This allows a user to read diff --git a/common/eip_3076/Cargo.toml b/common/eip_3076/Cargo.toml index 058e1fd1a0..157fe12cb3 100644 --- a/common/eip_3076/Cargo.toml +++ b/common/eip_3076/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [features] default = [] -arbitrary-fuzz = ["dep:arbitrary", "types/arbitrary"] +arbitrary = ["dep:arbitrary", "types/arbitrary"] json = ["dep:serde_json"] [dependencies] diff --git a/common/eip_3076/src/lib.rs b/common/eip_3076/src/lib.rs index cdd05d7b1e..0bf1a94d0e 100644 --- a/common/eip_3076/src/lib.rs +++ b/common/eip_3076/src/lib.rs @@ -13,7 +13,7 @@ pub enum Error { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, @@ -22,7 +22,7 @@ pub struct InterchangeMetadata { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeData { pub pubkey: PublicKeyBytes, pub signed_blocks: Vec, @@ -31,7 +31,7 @@ pub struct InterchangeData { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, @@ -41,7 +41,7 @@ pub struct SignedBlock { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, @@ -52,7 +52,7 @@ pub struct SignedAttestation { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Interchange { pub metadata: InterchangeMetadata, pub data: Vec, diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index ac96da6173..40c5ef58a6 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -898,6 +898,47 @@ impl BeaconNodeHttpClient { .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } + /// `GET beacon/states/{state_id}/proposer_lookahead` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_proposer_lookahead( + &self, + state_id: StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("proposer_lookahead"); + + self.get_fork_contextual(path, |fork| fork) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) + } + + /// `GET beacon/states/{state_id}/proposer_lookahead` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_proposer_lookahead_ssz( + &self, + state_id: StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("proposer_lookahead"); + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.default) + .await + } + /// `GET beacon/light_client/updates` /// /// Returns `Ok(None)` on a 404 error. @@ -1761,7 +1802,7 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, validators: &[ValidatorId], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1778,7 +1819,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_rewards_blocks( &self, block_id: BlockId, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1796,7 +1837,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, validators: &[ValidatorId], - ) -> Result { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -2144,6 +2185,24 @@ impl BeaconNodeHttpClient { .await } + /// `GET v2/validator/duties/proposer/{epoch}` + pub async fn get_validator_duties_proposer_v2( + &self, + epoch: Epoch, + ) -> Result>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("proposer") + .push(&epoch.to_string()); + + self.get_with_timeout(path, self.timeouts.proposer_duties) + .await + } + /// `GET v2/validator/blocks/{slot}` pub async fn get_validator_blocks( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 993c263cbf..5ff7a7e0f0 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,13 +1,10 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. -mod attestation_performance; -mod block_packing_efficiency; -mod block_rewards; mod custody; pub mod sync_state; use crate::{ - BeaconNodeHttpClient, DepositData, Error, Hash256, Slot, + BeaconNodeHttpClient, DepositData, Error, Hash256, lighthouse::sync_state::SyncState, types::{AdminPeer, Epoch, GenericResponse, ValidatorId}, }; @@ -16,13 +13,6 @@ use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; -pub use attestation_performance::{ - AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, -}; -pub use block_packing_efficiency::{ - BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, -}; -pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use custody::CustodyInfo; // Define "legacy" implementations of `Option` which use four bytes for encoding the union @@ -312,73 +302,4 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &req).await } - - /* - Analysis endpoints. - */ - - /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot - pub async fn get_lighthouse_analysis_block_rewards( - &self, - start_slot: Slot, - end_slot: Slot, - ) -> Result, Error> { - let mut path = self.server.expose_full().clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("analysis") - .push("block_rewards"); - - path.query_pairs_mut() - .append_pair("start_slot", &start_slot.to_string()) - .append_pair("end_slot", &end_slot.to_string()); - - self.get(path).await - } - - /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch - pub async fn get_lighthouse_analysis_block_packing( - &self, - start_epoch: Epoch, - end_epoch: Epoch, - ) -> Result, Error> { - let mut path = self.server.expose_full().clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("analysis") - .push("block_packing_efficiency"); - - path.query_pairs_mut() - .append_pair("start_epoch", &start_epoch.to_string()) - .append_pair("end_epoch", &end_epoch.to_string()); - - self.get(path).await - } - - /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch - pub async fn get_lighthouse_analysis_attestation_performance( - &self, - start_epoch: Epoch, - end_epoch: Epoch, - target: String, - ) -> Result, Error> { - let mut path = self.server.expose_full().clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("analysis") - .push("attestation_performance") - .push(&target); - - path.query_pairs_mut() - .append_pair("start_epoch", &start_epoch.to_string()) - .append_pair("end_epoch", &end_epoch.to_string()); - - self.get(path).await - } } diff --git a/common/eth2/src/lighthouse/attestation_performance.rs b/common/eth2/src/lighthouse/attestation_performance.rs deleted file mode 100644 index 5ce1d90a38..0000000000 --- a/common/eth2/src/lighthouse/attestation_performance.rs +++ /dev/null @@ -1,39 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use types::Epoch; - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationPerformanceStatistics { - pub active: bool, - pub head: bool, - pub target: bool, - pub source: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub delay: Option, -} - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationPerformance { - pub index: u64, - pub epochs: HashMap, -} - -impl AttestationPerformance { - pub fn initialize(indices: Vec) -> Vec { - let mut vec = Vec::with_capacity(indices.len()); - for index in indices { - vec.push(Self { - index, - ..Default::default() - }) - } - vec - } -} - -/// Query parameters for the `/lighthouse/analysis/attestation_performance` endpoint. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationPerformanceQuery { - pub start_epoch: Epoch, - pub end_epoch: Epoch, -} diff --git a/common/eth2/src/lighthouse/block_packing_efficiency.rs b/common/eth2/src/lighthouse/block_packing_efficiency.rs deleted file mode 100644 index 0ad6f46031..0000000000 --- a/common/eth2/src/lighthouse/block_packing_efficiency.rs +++ /dev/null @@ -1,34 +0,0 @@ -use serde::{Deserialize, Serialize}; -use types::{Epoch, Hash256, Slot}; - -type CommitteePosition = usize; -type Committee = u64; -type ValidatorIndex = u64; - -#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] -pub struct UniqueAttestation { - pub slot: Slot, - pub committee_index: Committee, - pub committee_position: CommitteePosition, -} -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct ProposerInfo { - pub validator_index: ValidatorIndex, - pub graffiti: String, -} - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockPackingEfficiency { - pub slot: Slot, - pub block_hash: Hash256, - pub proposer_info: ProposerInfo, - pub available_attestations: usize, - pub included_attestations: usize, - pub prior_skip_slots: u64, -} - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockPackingEfficiencyQuery { - pub start_epoch: Epoch, - pub end_epoch: Epoch, -} diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs deleted file mode 100644 index 38070f3539..0000000000 --- a/common/eth2/src/lighthouse/block_rewards.rs +++ /dev/null @@ -1,60 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use types::{AttestationData, Hash256, Slot}; - -/// Details about the rewards paid to a block proposer for proposing a block. -/// -/// All rewards in GWei. -/// -/// Presently this only counts attestation rewards, but in future should be expanded -/// to include information on slashings and sync committee aggregates too. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockReward { - /// Sum of all reward components. - pub total: u64, - /// Block root of the block that these rewards are for. - pub block_root: Hash256, - /// Metadata about the block, particularly reward-relevant metadata. - pub meta: BlockRewardMeta, - /// Rewards due to attestations. - pub attestation_rewards: AttestationRewards, - /// Sum of rewards due to sync committee signatures. - pub sync_committee_rewards: u64, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockRewardMeta { - pub slot: Slot, - pub parent_slot: Slot, - pub proposer_index: u64, - pub graffiti: String, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationRewards { - /// Total block reward from attestations included. - pub total: u64, - /// Total rewards from previous epoch attestations. - pub prev_epoch_total: u64, - /// Total rewards from current epoch attestations. - pub curr_epoch_total: u64, - /// Vec of attestation rewards for each attestation included. - /// - /// Each element of the vec is a map from validator index to reward. - pub per_attestation_rewards: Vec>, - /// The attestations themselves (optional). - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub attestations: Vec, -} - -/// Query parameters for the `/lighthouse/block_rewards` endpoint. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockRewardsQuery { - /// Lower slot limit for block rewards returned (inclusive). - pub start_slot: Slot, - /// Upper slot limit for block rewards returned (inclusive). - pub end_slot: Slot, - /// Include the full attestations themselves? - #[serde(default)] - pub include_attestations: bool, -} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a3785b6ea6..4eef3e3faa 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -37,9 +37,6 @@ pub mod beacon_response { pub use crate::beacon_response::*; } -#[cfg(feature = "lighthouse")] -use crate::lighthouse::BlockReward; - // Re-export error types from the unified error module pub use crate::error::{ErrorMessage, Failure, IndexedErrorMessage, ResponseError as Error}; @@ -711,6 +708,15 @@ pub struct DataColumnIndicesQuery { #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); +impl<'de, T> ContextDeserialize<'de, T> for ValidatorIndexData { + fn context_deserialize(deserializer: D, _context: T) -> Result + where + D: Deserializer<'de>, + { + Self::deserialize(deserializer) + } +} + /// Borrowed variant of `ValidatorIndexData`, for serializing/sending. #[derive(Clone, Copy, Serialize)] #[serde(transparent)] @@ -1205,8 +1211,6 @@ pub enum EventKind { LateHead(SseLateHead), LightClientFinalityUpdate(Box>>), LightClientOptimisticUpdate(Box>>), - #[cfg(feature = "lighthouse")] - BlockReward(BlockReward), PayloadAttributes(VersionedSsePayloadAttributes), ProposerSlashing(Box), AttesterSlashing(Box>), @@ -1233,8 +1237,6 @@ impl EventKind { EventKind::LateHead(_) => "late_head", EventKind::LightClientFinalityUpdate(_) => "light_client_finality_update", EventKind::LightClientOptimisticUpdate(_) => "light_client_optimistic_update", - #[cfg(feature = "lighthouse")] - EventKind::BlockReward(_) => "block_reward", EventKind::ProposerSlashing(_) => "proposer_slashing", EventKind::AttesterSlashing(_) => "attester_slashing", EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", @@ -1312,10 +1314,6 @@ impl EventKind { })?), ))) } - #[cfg(feature = "lighthouse")] - "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( - |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), - )?)), "attester_slashing" => Ok(EventKind::AttesterSlashing( serde_json::from_str(data).map_err(|e| { ServerError::InvalidServerSentEvent(format!("Attester Slashing: {:?}", e)) @@ -1378,8 +1376,6 @@ pub enum EventTopic { PayloadAttributes, LightClientFinalityUpdate, LightClientOptimisticUpdate, - #[cfg(feature = "lighthouse")] - BlockReward, AttesterSlashing, ProposerSlashing, BlsToExecutionChange, @@ -1407,8 +1403,6 @@ impl FromStr for EventTopic { "late_head" => Ok(EventTopic::LateHead), "light_client_finality_update" => Ok(EventTopic::LightClientFinalityUpdate), "light_client_optimistic_update" => Ok(EventTopic::LightClientOptimisticUpdate), - #[cfg(feature = "lighthouse")] - "block_reward" => Ok(EventTopic::BlockReward), "attester_slashing" => Ok(EventTopic::AttesterSlashing), "proposer_slashing" => Ok(EventTopic::ProposerSlashing), "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), @@ -1437,8 +1431,6 @@ impl fmt::Display for EventTopic { EventTopic::LateHead => write!(f, "late_head"), EventTopic::LightClientFinalityUpdate => write!(f, "light_client_finality_update"), EventTopic::LightClientOptimisticUpdate => write!(f, "light_client_optimistic_update"), - #[cfg(feature = "lighthouse")] - EventTopic::BlockReward => write!(f, "block_reward"), EventTopic::AttesterSlashing => write!(f, "attester_slashing"), EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index f0c04d891a..e1eb022cc9 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -49,7 +49,7 @@ ELECTRA_FORK_VERSION: 0x0500006f ELECTRA_FORK_EPOCH: 948224 # Thu Mar 6 2025 09:43:40 GMT+0000 # Fulu FULU_FORK_VERSION: 0x0600006f -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 1353216 # Mon Mar 16 2026 09:33:00 UTC # Gloas GLOAS_FORK_VERSION: 0x0700006f GLOAS_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 34313aa393..d27f7a09e8 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -46,7 +46,7 @@ ELECTRA_FORK_VERSION: 0x05000064 ELECTRA_FORK_EPOCH: 1337856 # 2025-04-30T14:03:40.000Z # Fulu FULU_FORK_VERSION: 0x06000064 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 1714688 # Tue Apr 14 2026 12:06:20 GMT+0000 # Gloas GLOAS_FORK_VERSION: 0x07000064 GLOAS_FORK_EPOCH: 18446744073709551615 @@ -156,6 +156,11 @@ NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# `2**14` (= 16384 epochs, ~15 days) +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 16384 MAX_BLOBS_PER_BLOCK_FULU: 12 # Gloas \ No newline at end of file diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 41c82dbd61..1606b8ceb4 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -5,7 +5,8 @@ authors = ["blacktemplar "] edition = { workspace = true } [features] -test_logger = [] # Print log output to stderr when running tests instead of dropping it +# Print log output to stderr when running tests instead of dropping it. +test_logger = [] [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } @@ -13,7 +14,7 @@ logroller = { workspace = true } metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -tokio = { workspace = true, features = [ "time" ] } +tokio = { workspace = true, features = ["time"] } tracing = { workspace = true } tracing-appender = { workspace = true } tracing-core = { workspace = true } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 1052128852..e90490bf09 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -35,7 +35,4 @@ tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats"] } # Jemalloc's background_threads feature requires Linux (pthreads). [target.'cfg(target_os = "linux")'.dependencies] -tikv-jemallocator = { version = "0.6.0", optional = true, features = [ - "stats", - "background_threads", -] } +tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats", "background_threads"] } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 10659b9b3a..299291e079 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -6,7 +6,6 @@ use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, @@ -1553,47 +1552,17 @@ where /// /// This is used when persisting the state of the fork choice to disk. #[superstruct( - variants(V17, V28), + variants(V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct PersistedForkChoice { - #[superstruct(only(V17))] - pub proto_array_bytes: Vec, - #[superstruct(only(V28))] pub proto_array: proto_array::core::SszContainerV28, pub queued_attestations: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV28; -impl TryFrom for PersistedForkChoiceV28 { - type Error = ssz::DecodeError; - - fn try_from(v17: PersistedForkChoiceV17) -> Result { - let container_v17 = - proto_array::core::SszContainerV17::from_ssz_bytes(&v17.proto_array_bytes)?; - let container_v28 = container_v17.into(); - - Ok(Self { - proto_array: container_v28, - queued_attestations: v17.queued_attestations, - }) - } -} - -impl From<(PersistedForkChoiceV28, JustifiedBalances)> for PersistedForkChoiceV17 { - fn from((v28, balances): (PersistedForkChoiceV28, JustifiedBalances)) -> Self { - let container_v17 = proto_array::core::SszContainerV17::from((v28.proto_array, balances)); - let proto_array_bytes = container_v17.as_ssz_bytes(); - - Self { - proto_array_bytes, - queued_attestations: v28.queued_attestations, - } - } -} - #[cfg(test)] mod tests { use types::MainnetEthSpec; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index afe06dee1b..8cf2936db4 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV17, PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, + PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 964e836d91..04e57d791b 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -16,5 +16,5 @@ pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::{SszContainer, SszContainerV17, SszContainerV28}; + pub use super::ssz_container::{SszContainer, SszContainerV28}; } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 1e01b74c8c..42696256f7 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -17,14 +17,12 @@ four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); pub type SszContainer = SszContainerV28; #[superstruct( - variants(V17, V28), + variants(V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct SszContainer { pub votes: Vec, - #[superstruct(only(V17))] - pub balances: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration justified_checkpoint: Checkpoint, @@ -73,34 +71,3 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { }) } } - -// Convert V17 to V28 by dropping balances. -impl From for SszContainerV28 { - fn from(v17: SszContainerV17) -> Self { - Self { - votes: v17.votes, - prune_threshold: v17.prune_threshold, - justified_checkpoint: v17.justified_checkpoint, - finalized_checkpoint: v17.finalized_checkpoint, - nodes: v17.nodes, - indices: v17.indices, - previous_proposer_boost: v17.previous_proposer_boost, - } - } -} - -// Convert V28 to V17 by re-adding balances. -impl From<(SszContainerV28, JustifiedBalances)> for SszContainerV17 { - fn from((v28, balances): (SszContainerV28, JustifiedBalances)) -> Self { - Self { - votes: v28.votes, - balances: balances.effective_balances.clone(), - prune_threshold: v28.prune_threshold, - justified_checkpoint: v28.justified_checkpoint, - finalized_checkpoint: v28.finalized_checkpoint, - nodes: v28.nodes, - indices: v28.indices, - previous_proposer_boost: v28.previous_proposer_boost, - } - } -} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 7426995439..ae0af03231 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -7,10 +7,10 @@ edition = { workspace = true } [features] default = [] fake_crypto = ["bls/fake_crypto"] -arbitrary-fuzz = [ +arbitrary = [ "dep:arbitrary", "smallvec/arbitrary", - "types/arbitrary-fuzz", + "types/arbitrary", "merkle_proof/arbitrary", "ethereum_ssz/arbitrary", "ssz_types/arbitrary", diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 93d0313867..f5f06d1cb9 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -51,6 +51,7 @@ pub struct BlockReplayer< /// Pre-Gloas, this is all states. Post-Gloas, this is *just* the states corresponding to beacon /// blocks. For states corresponding to payloads, we read the state root from the payload /// envelope. + // TODO(gloas): this concept might need adjusting when we implement the cold DB. pub(crate) state_root_iter: Option>, state_root_miss: bool, /// The payload status of the state desired as the end result of block replay. @@ -312,6 +313,7 @@ where // indicates that the parent is full (and it hasn't already been applied). state_root = if block.fork_name_unchecked().gloas_enabled() && self.state.slot() == self.state.latest_block_header().slot + && self.state.payload_status() == StatePayloadStatus::Pending { let latest_bid_block_hash = self .state diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index 1e3c54f1e1..97953b835f 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -21,7 +21,7 @@ macro_rules! envelope_verify { } /// The strategy to be used when validating the payloads state root. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Clone, Copy)] pub enum VerifyStateRoot { /// Validate state root. diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 037e1c7cc7..5aa610e98e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -55,12 +55,12 @@ use crate::common::update_progressive_balances_cache::{ initialize_progressive_balances_cache, update_progressive_balances_metrics, }; use crate::epoch_cache::initialize_epoch_cache; -#[cfg(feature = "arbitrary-fuzz")] +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use tracing::instrument; /// The strategy to be used when validating the block's signatures. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(PartialEq, Clone, Copy, Debug)] pub enum BlockSignatureStrategy { /// Do not validate any signature. Use with caution. @@ -74,7 +74,7 @@ pub enum BlockSignatureStrategy { } /// The strategy to be used when validating the block's signatures. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(PartialEq, Clone, Copy)] pub enum VerifySignatures { /// Validate all signatures encountered. @@ -90,7 +90,7 @@ impl VerifySignatures { } /// Control verification of the latest block header. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(PartialEq, Clone, Copy)] pub enum VerifyBlockRoot { True, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9743812632..ac64398655 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -4,7 +4,10 @@ use crate::common::{ get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, slash_validator, }; -use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use crate::per_block_processing::builder::{ + convert_validator_index_to_builder_index, is_builder_index, +}; +use crate::per_block_processing::errors::{BlockProcessingError, ExitInvalid, IntoWithIndex}; use crate::per_block_processing::verify_payload_attestation::verify_payload_attestation; use bls::{PublicKeyBytes, SignatureBytes}; use ssz_types::FixedVector; @@ -507,7 +510,26 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(state, None, exit, verify_signatures, spec) + // Exits must specify an epoch when they become valid; they are not valid before then. + let current_epoch = state.current_epoch(); + if current_epoch < exit.message.epoch { + return Err(BlockOperationError::invalid(ExitInvalid::FutureEpoch { + state: current_epoch, + exit: exit.message.epoch, + }) + .into_with_index(i)); + } + + // [New in Gloas:EIP7732] + if state.fork_name_unchecked().gloas_enabled() + && is_builder_index(exit.message.validator_index) + { + process_builder_voluntary_exit(state, exit, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + continue; + } + + verify_exit(state, Some(current_epoch), exit, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; @@ -515,6 +537,87 @@ pub fn process_exits( Ok(()) } +/// Process a builder voluntary exit. [New in Gloas:EIP7732] +fn process_builder_voluntary_exit( + state: &mut BeaconState, + signed_exit: &SignedVoluntaryExit, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockOperationError> { + let builder_index = + convert_validator_index_to_builder_index(signed_exit.message.validator_index); + + let builder = state + .builders()? + .get(builder_index as usize) + .cloned() + .ok_or(BlockOperationError::invalid(ExitInvalid::ValidatorUnknown( + signed_exit.message.validator_index, + )))?; + + // Verify the builder is active + let finalized_epoch = state.finalized_checkpoint().epoch; + if !builder.is_active_at_finalized_epoch(finalized_epoch, spec) { + return Err(BlockOperationError::invalid(ExitInvalid::NotActive( + signed_exit.message.validator_index, + ))); + } + + // Only exit builder if it has no pending withdrawals in the queue + let pending_balance = state.get_pending_balance_to_withdraw_for_builder(builder_index)?; + if pending_balance != 0 { + return Err(BlockOperationError::invalid( + ExitInvalid::PendingWithdrawalInQueue(signed_exit.message.validator_index), + )); + } + + // Verify signature (using EIP-7044 domain: capella_fork_version for Deneb+) + if verify_signatures.is_true() { + let pubkey = builder.pubkey; + let domain = spec.compute_domain( + Domain::VoluntaryExit, + spec.capella_fork_version, + state.genesis_validators_root(), + ); + let message = signed_exit.message.signing_root(domain); + // TODO(gloas): use builder pubkey cache once available + let bls_pubkey = pubkey + .decompress() + .map_err(|_| BlockOperationError::invalid(ExitInvalid::BadSignature))?; + if !signed_exit.signature.verify(&bls_pubkey, message) { + return Err(BlockOperationError::invalid(ExitInvalid::BadSignature)); + } + } + + // Initiate builder exit + initiate_builder_exit(state, builder_index, spec)?; + + Ok(()) +} + +/// Initiate the exit of a builder. [New in Gloas:EIP7732] +fn initiate_builder_exit( + state: &mut BeaconState, + builder_index: u64, + spec: &ChainSpec, +) -> Result<(), BeaconStateError> { + let current_epoch = state.current_epoch(); + let builder = state + .builders_mut()? + .get_mut(builder_index as usize) + .ok_or(BeaconStateError::UnknownBuilder(builder_index))?; + + // Return if builder already initiated exit + if builder.withdrawable_epoch != spec.far_future_epoch { + return Ok(()); + } + + // Set builder exit epoch + builder.withdrawable_epoch = current_epoch.safe_add(spec.min_builder_withdrawability_delay)?; + + Ok(()) +} + /// Validates each `bls_to_execution_change` and updates the state /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns @@ -814,6 +917,30 @@ pub fn process_deposit_requests_post_gloas( Ok(()) } +/// Check if there is a pending deposit for a new validator with the given pubkey. +// TODO(gloas): cache the deposit signature validation or remove this loop entirely if possible, +// it is `O(n * m)` where `n` is max 8192 and `m` is max 128M. +fn is_pending_validator( + state: &BeaconState, + pubkey: &PublicKeyBytes, + spec: &ChainSpec, +) -> Result { + for deposit in state.pending_deposits()?.iter() { + if deposit.pubkey == *pubkey { + let deposit_data = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature.clone(), + }; + if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + return Ok(true); + } + } + } + Ok(false) +} + pub fn process_deposit_request_post_gloas( state: &mut BeaconState, deposit_request: &DepositRequest, @@ -835,10 +962,14 @@ pub fn process_deposit_request_post_gloas( let validator_index = state.get_validator_index(&deposit_request.pubkey)?; let is_validator = validator_index.is_some(); - let is_builder_prefix = + let has_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials, spec); - if is_builder || (is_builder_prefix && !is_validator) { + if is_builder + || (has_builder_prefix + && !is_validator + && !is_pending_validator(state, &deposit_request.pubkey, spec)?) + { // Apply builder deposits immediately apply_deposit_for_builder( state, diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index c5ec80b92a..3e4f7e8189 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -2,7 +2,7 @@ use crate::common::attesting_indices_base::get_attesting_indices; use safe_arith::SafeArith; use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, PendingAttestation}; -#[cfg(feature = "arbitrary-fuzz")] +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; /// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` @@ -16,7 +16,7 @@ macro_rules! set_self_if_other_is_true { } /// The information required to reward a block producer for including an attestation in a block. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Clone, Copy, PartialEq)] pub struct InclusionInfo { /// The distance between the attestation slot and the slot that attestation was included in a @@ -48,7 +48,7 @@ impl InclusionInfo { } /// Information required to reward some validator during the current and previous epoch. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Default, Clone, PartialEq)] pub struct ValidatorStatus { /// True if the validator has been slashed, ever. @@ -118,7 +118,7 @@ impl ValidatorStatus { /// epochs. #[derive(Clone, Debug, PartialEq)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub struct TotalBalances { /// The effective balance increment from the spec. effective_balance_increment: u64, @@ -175,7 +175,7 @@ impl TotalBalances { /// Summarised information about validator participation in the _previous and _current_ epochs of /// some `BeaconState`. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Clone)] pub struct ValidatorStatuses { /// Information about each individual validator from the state's validator registry. diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index a818e08775..3c043a65f2 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -4,8 +4,8 @@ use milhouse::List; use std::sync::Arc; use types::{ BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, - Validator, consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, + state::Validators, }; /// Provides a summary of validator participation during the epoch. @@ -26,7 +26,7 @@ pub enum EpochProcessingSummary { #[derive(PartialEq, Debug)] pub struct ParticipationEpochSummary { /// Copy of the validator registry prior to mutation. - validators: List, + validators: Validators, /// Copy of the participation flags for the previous epoch. previous_epoch_participation: List, /// Copy of the participation flags for the current epoch. @@ -37,7 +37,7 @@ pub struct ParticipationEpochSummary { impl ParticipationEpochSummary { pub fn new( - validators: List, + validators: Validators, previous_epoch_participation: List, current_epoch_participation: List, previous_epoch: Epoch, diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index b1d8770d4f..1114562155 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -80,7 +80,7 @@ pub fn partial_state_advance( } else if state.slot() == state.latest_block_header().slot && !state.latest_block_header().state_root.is_zero() { - // Post-Gloas Full block case. + // Post-Gloas Full state case. state.latest_block_header().state_root } else { state_root_opt.ok_or(Error::StateRootNotProvided)? diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index a13786f9f6..1e9c3d5fe3 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -7,7 +7,7 @@ use crate::per_block_processing::{ verify_attester_slashing, verify_bls_to_execution_change, verify_exit, verify_proposer_slashing, }; -#[cfg(feature = "arbitrary-fuzz")] +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use educe::Educe; use smallvec::{SmallVec, smallvec}; @@ -41,14 +41,14 @@ pub trait TransformPersist { /// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. #[derive(Educe, Debug, Clone)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[educe( PartialEq, Eq, Hash(bound(T: TransformPersist + std::hash::Hash, E: EthSpec)) )] #[cfg_attr( - feature = "arbitrary-fuzz", + feature = "arbitrary", arbitrary(bound = "T: TransformPersist + Arbitrary<'arbitrary>, E: EthSpec") )] pub struct SigVerifiedOp { @@ -139,7 +139,7 @@ struct SigVerifiedOpDecode { /// We need to store multiple `ForkVersion`s because attester slashings contain two indexed /// attestations which may be signed using different versions. #[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode, TestRandom)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub struct VerifiedAgainst { fork_versions: SmallVec<[ForkVersion; MAX_FORKS_VERIFIED_AGAINST]>, } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index e7e382714b..c09e3d6931 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "types" version = "0.2.1" -authors = [ - "Paul Hauner ", - "Age Manning ", -] +authors = ["Paul Hauner ", "Age Manning "] edition = { workspace = true } [features] @@ -22,7 +19,6 @@ arbitrary = [ "ssz_types/arbitrary", "swap_or_not_shuffle/arbitrary", ] -arbitrary-fuzz = ["arbitrary"] portable = ["bls/supranational-portable"] [dependencies] diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index b7b1d9d2a2..dd6f52426a 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -377,7 +377,17 @@ impl> SignedBeaconBlock .map(|bid| bid.message.block_hash) } - /// Check if the `parent_hash` in this block's `signed_payload_bid` matches `block_hash`. + /// Convenience accessor for the block's bid's `parent_block_hash`. + /// + /// This method returns an error prior to Gloas. + pub fn payload_bid_parent_block_hash(&self) -> Result { + self.message() + .body() + .signed_execution_payload_bid() + .map(|bid| bid.message.parent_block_hash) + } + + /// Check if the `parent_hash` in this block's `signed_payload_bid` matches `parent_block_hash`. /// /// This function is useful post-Gloas for determining if the parent block is full, *without* /// necessarily needing access to a beacon state. The passed in `parent_block_hash` MUST be the diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 2f3b5da956..458622d7e6 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1616,7 +1616,7 @@ impl ChainSpec { * Fulu hard fork params */ fulu_fork_version: [0x06, 0x00, 0x00, 0x64], - fulu_fork_epoch: None, + fulu_fork_epoch: Some(Epoch::new(1714688)), custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, @@ -1686,8 +1686,7 @@ impl ChainSpec { * Networking Fulu specific */ blob_schedule: BlobSchedule::default(), - min_epochs_for_data_column_sidecars_requests: - default_min_epochs_for_data_column_sidecars_requests(), + min_epochs_for_data_column_sidecars_requests: 16384, max_data_columns_by_root_request: default_data_columns_by_root_request(), max_payload_envelopes_by_root_request: default_max_payload_envelopes_by_root_request(), diff --git a/consensus/types/src/core/consts.rs b/consensus/types/src/core/consts.rs index 0e131f26ff..728e532ed1 100644 --- a/consensus/types/src/core/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -33,9 +33,9 @@ pub mod gloas { // Fork choice constants pub type PayloadStatus = u8; - pub const PAYLOAD_STATUS_PENDING: PayloadStatus = 0; - pub const PAYLOAD_STATUS_EMPTY: PayloadStatus = 1; - pub const PAYLOAD_STATUS_FULL: PayloadStatus = 2; + pub const PAYLOAD_STATUS_EMPTY: PayloadStatus = 0; + pub const PAYLOAD_STATUS_FULL: PayloadStatus = 1; + pub const PAYLOAD_STATUS_PENDING: PayloadStatus = 2; pub const ATTESTATION_TIMELINESS_INDEX: usize = 0; pub const PTC_TIMELINESS_INDEX: usize = 1; diff --git a/consensus/types/src/data/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs index 638491d6d7..2774176190 100644 --- a/consensus/types/src/data/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -3,7 +3,7 @@ use std::{fmt::Debug, hash::Hash, sync::Arc}; use bls::Signature; use context_deserialize::context_deserialize; use educe::Educe; -use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; +use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; use rand::Rng; use safe_arith::ArithError; @@ -253,14 +253,17 @@ impl BlobSidecar { let blob = Blob::::new(blob_bytes) .map_err(|e| format!("error constructing random blob: {:?}", e))?; - let kzg_blob = KzgBlob::from_bytes(&blob).unwrap(); + let kzg_blob: &[u8; BYTES_PER_BLOB] = blob + .as_ref() + .try_into() + .map_err(|e| format!("error converting blob to kzg blob ref: {:?}", e))?; let commitment = kzg - .blob_to_kzg_commitment(&kzg_blob) + .blob_to_kzg_commitment(kzg_blob) .map_err(|e| format!("error computing kzg commitment: {:?}", e))?; let proof = kzg - .compute_blob_kzg_proof(&kzg_blob, commitment) + .compute_blob_kzg_proof(kzg_blob, commitment) .map_err(|e| format!("error computing kzg proof: {:?}", e))?; Ok(Self { diff --git a/consensus/types/src/execution/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs index 64afaa8655..e54d114fb4 100644 --- a/consensus/types/src/execution/execution_payload_envelope.rs +++ b/consensus/types/src/execution/execution_payload_envelope.rs @@ -3,7 +3,9 @@ use crate::test_utils::TestRandom; use crate::{EthSpec, ForkName, Hash256, SignedRoot, Slot}; use context_deserialize::context_deserialize; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; +use ssz::{BYTES_PER_LENGTH_OFFSET, Encode as SszEncode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -24,6 +26,44 @@ pub struct ExecutionPayloadEnvelope { pub state_root: Hash256, } +impl ExecutionPayloadEnvelope { + /// Returns an empty envelope with all fields zeroed. Used for SSZ size calculations. + pub fn empty() -> Self { + Self { + payload: ExecutionPayloadGloas::default(), + execution_requests: ExecutionRequests::default(), + builder_index: 0, + beacon_block_root: Hash256::zero(), + slot: Slot::new(0), + state_root: Hash256::zero(), + } + } + + /// Returns the minimum SSZ-encoded size (all variable-length fields empty). + pub fn min_size() -> usize { + Self::empty().as_ssz_bytes().len() + } + + /// Returns the maximum SSZ-encoded size. + #[allow(clippy::arithmetic_side_effects)] + pub fn max_size() -> usize { + Self::min_size() + // ExecutionPayloadGloas variable-length fields: + + (E::max_extra_data_bytes() * ::ssz_fixed_len()) + + (E::max_transactions_per_payload() + * (BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) + + (E::max_withdrawals_per_payload() + * ::ssz_fixed_len()) + // ExecutionRequests variable-length fields: + + (E::max_deposit_requests_per_payload() + * ::ssz_fixed_len()) + + (E::max_withdrawal_requests_per_payload() + * ::ssz_fixed_len()) + + (E::max_consolidation_requests_per_payload() + * ::ssz_fixed_len()) + } +} + impl SignedRoot for ExecutionPayloadEnvelope {} #[cfg(test)] diff --git a/consensus/types/src/execution/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs index 65c657e878..76fa841680 100644 --- a/consensus/types/src/execution/signed_execution_payload_envelope.rs +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -8,7 +8,7 @@ use bls::{PublicKey, Signature}; use context_deserialize::context_deserialize; use educe::Educe; use serde::{Deserialize, Serialize}; -use ssz::{BYTES_PER_LENGTH_OFFSET, Encode as SszEncode}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -26,7 +26,7 @@ impl SignedExecutionPayloadEnvelope { /// Returns the minimum SSZ-encoded size (all variable-length fields empty). pub fn min_size() -> usize { Self { - message: ExecutionPayloadEnvelope::default(), + message: ExecutionPayloadEnvelope::empty(), signature: Signature::empty(), } .as_ssz_bytes() @@ -36,21 +36,9 @@ impl SignedExecutionPayloadEnvelope { /// Returns the maximum SSZ-encoded size. #[allow(clippy::arithmetic_side_effects)] pub fn max_size() -> usize { - // Start from the min size (all variable-length fields empty) - Self::min_size() - // ExecutionPayloadGloas variable-length fields: - + (E::max_extra_data_bytes() * ::ssz_fixed_len()) - + (E::max_transactions_per_payload() - * (BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) - + (E::max_withdrawals_per_payload() - * ::ssz_fixed_len()) - // ExecutionRequests variable-length fields: - + (E::max_deposit_requests_per_payload() - * ::ssz_fixed_len()) - + (E::max_withdrawal_requests_per_payload() - * ::ssz_fixed_len()) - + (E::max_consolidation_requests_per_payload() - * ::ssz_fixed_len()) + // Signature is fixed-size, so the variable-length delta is entirely from the envelope. + Self::min_size() + ExecutionPayloadEnvelope::::max_size() + - ExecutionPayloadEnvelope::::min_size() } pub fn slot(&self) -> Slot { diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs index 63533ec71f..e0ec9dd956 100644 --- a/consensus/types/src/kzg_ext/mod.rs +++ b/consensus/types/src/kzg_ext/mod.rs @@ -1,6 +1,6 @@ pub mod consts; -pub use kzg::{Blob as KzgBlob, Error as KzgError, Kzg, KzgCommitment, KzgProof}; +pub use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof}; use ssz_types::VariableList; diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 34cfd0ca1c..f431055c5f 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -14,6 +14,7 @@ use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{BitVector, FixedVector}; +use std::collections::BTreeMap; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; @@ -71,7 +72,8 @@ const MAX_RANDOM_VALUE: u64 = (1 << 16) - 1; // Spec: https://github.com/ethereum/consensus-specs/blob/1937aff86b41b5171a9bc3972515986f1bbbf303/specs/phase0/weak-subjectivity.md?plain=1#L50-L71 const SAFETY_DECAY: u64 = 10; -pub type Validators = List::ValidatorRegistryLimit>; +pub type Validators = + List::ValidatorRegistryLimit, BTreeMap>; pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] @@ -477,7 +479,7 @@ where // Registry #[compare_fields(as_iter)] #[test_random(default)] - pub validators: List, + pub validators: Validators, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] #[compare_fields(as_iter)] #[test_random(default)] diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs index 096bb67167..a3bb1b8c9f 100644 --- a/consensus/types/src/state/mod.rs +++ b/consensus/types/src/state/mod.rs @@ -17,7 +17,7 @@ pub use balance::Balance; pub use beacon_state::{ BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, BeaconStateCapella, BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateFulu, BeaconStateGloas, - BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, DEFAULT_PRE_ELECTRA_WS_PERIOD, + BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, DEFAULT_PRE_ELECTRA_WS_PERIOD, Validators, }; pub use committee_cache::{ CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index 840f8cfc9c..19f39a182b 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -12,7 +12,6 @@ fake_crypto = [] [dependencies] arbitrary = { workspace = true, optional = true } -c-kzg = { workspace = true } educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -28,7 +27,6 @@ tree_hash = { workspace = true } [dev-dependencies] criterion = { workspace = true } -serde_json = { workspace = true } [[bench]] name = "benchmark" diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 432d84654a..d5d5596211 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -1,6 +1,5 @@ -use c_kzg::KzgSettings; use criterion::{criterion_group, criterion_main, Criterion}; -use kzg::{trusted_setup::get_trusted_setup, TrustedSetup, NO_PRECOMPUTE}; +use kzg::trusted_setup::get_trusted_setup; use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; pub fn bench_init_context(c: &mut Criterion) { @@ -20,21 +19,6 @@ pub fn bench_init_context(c: &mut Criterion) { ) }) }); - c.bench_function("Initialize context c-kzg (4844)", |b| { - b.iter(|| { - let trusted_setup: TrustedSetup = - serde_json::from_reader(trusted_setup_bytes.as_slice()) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); - KzgSettings::load_trusted_setup( - &trusted_setup.g1_monomial(), - &trusted_setup.g1_lagrange(), - &trusted_setup.g2_monomial(), - NO_PRECOMPUTE, - ) - .unwrap() - }) - }); } criterion_group!(benches, bench_init_context); diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs index bc5fc5f5aa..d8ef4b36cf 100644 --- a/crypto/kzg/src/kzg_commitment.rs +++ b/crypto/kzg/src/kzg_commitment.rs @@ -1,4 +1,4 @@ -use c_kzg::BYTES_PER_COMMITMENT; +use crate::{Bytes48, BYTES_PER_COMMITMENT}; use educe::Educe; use ethereum_hashing::hash_fixed; use serde::de::{Deserialize, Deserializer}; @@ -14,7 +14,7 @@ pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; #[derive(Educe, Clone, Copy, Encode, Decode)] #[educe(PartialEq, Eq, Hash)] #[ssz(struct_behaviour = "transparent")] -pub struct KzgCommitment(pub [u8; c_kzg::BYTES_PER_COMMITMENT]); +pub struct KzgCommitment(pub [u8; BYTES_PER_COMMITMENT]); impl KzgCommitment { pub fn calculate_versioned_hash(&self) -> Hash256 { @@ -24,13 +24,13 @@ impl KzgCommitment { } pub fn empty_for_testing() -> Self { - KzgCommitment([0; c_kzg::BYTES_PER_COMMITMENT]) + KzgCommitment([0; BYTES_PER_COMMITMENT]) } } -impl From for c_kzg::Bytes48 { +impl From for Bytes48 { fn from(value: KzgCommitment) -> Self { - value.0.into() + value.0 } } diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs index aa9ed185a0..e0867520eb 100644 --- a/crypto/kzg/src/kzg_proof.rs +++ b/crypto/kzg/src/kzg_proof.rs @@ -1,4 +1,4 @@ -use c_kzg::BYTES_PER_PROOF; +use crate::BYTES_PER_PROOF; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz_derive::{Decode, Encode}; @@ -11,12 +11,6 @@ use tree_hash::{PackedEncoding, TreeHash}; #[ssz(struct_behaviour = "transparent")] pub struct KzgProof(pub [u8; BYTES_PER_PROOF]); -impl From for c_kzg::Bytes48 { - fn from(value: KzgProof) -> Self { - value.0.into() - } -} - impl KzgProof { /// Creates a valid proof using `G1_POINT_AT_INFINITY`. pub fn empty() -> Self { diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 66499dad8e..6ee352b0db 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -12,11 +12,12 @@ pub use crate::{ trusted_setup::TrustedSetup, }; -pub use c_kzg::{ - Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, - BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, +pub use rust_eth_kzg::constants::{ + BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB, }; +pub const BYTES_PER_PROOF: usize = 48; + use crate::trusted_setup::load_trusted_setup; use rayon::prelude::*; pub use rust_eth_kzg::{ @@ -25,13 +26,6 @@ pub use rust_eth_kzg::{ }; use tracing::{instrument, Span}; -/// Disables the fixed-base multi-scalar multiplication optimization for computing -/// cell KZG proofs, because `rust-eth-kzg` already handles the precomputation. -/// -/// Details about `precompute` parameter can be found here: -/// -pub const NO_PRECOMPUTE: u64 = 0; - // Note: Both `NUMBER_OF_COLUMNS` and `CELLS_PER_EXT_BLOB` are preset values - however this // is a constant in the KZG library - be aware that overriding `NUMBER_OF_COLUMNS` will break KZG // operations. @@ -39,14 +33,15 @@ pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_E pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; +type Bytes32 = [u8; 32]; +type Bytes48 = [u8; 48]; + #[derive(Debug)] pub enum Error { /// An error from initialising the trusted setup. TrustedSetupError(String), - /// An error from the underlying kzg library. - Kzg(c_kzg::Error), - /// A prover/verifier error from the rust-eth-kzg library. - PeerDASKZG(rust_eth_kzg::Error), + /// An error from the rust-eth-kzg library. + Kzg(rust_eth_kzg::Error), /// The kzg verification failed KzgVerificationFailed, /// Misc indexing error @@ -57,38 +52,29 @@ pub enum Error { DASContextUninitialized, } -impl From for Error { - fn from(value: c_kzg::Error) -> Self { +impl From for Error { + fn from(value: rust_eth_kzg::Error) -> Self { Error::Kzg(value) } } -/// A wrapper over a kzg library that holds the trusted setup parameters. +/// A wrapper over the rust-eth-kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { - trusted_setup: KzgSettings, context: DASContext, } impl Kzg { pub fn new_from_trusted_setup_no_precomp(trusted_setup: &[u8]) -> Result { - let (ckzg_trusted_setup, rkzg_trusted_setup) = load_trusted_setup(trusted_setup)?; + let rkzg_trusted_setup = load_trusted_setup(trusted_setup)?; let context = DASContext::new(&rkzg_trusted_setup, rust_eth_kzg::UsePrecomp::No); - Ok(Self { - trusted_setup: KzgSettings::load_trusted_setup( - &ckzg_trusted_setup.g1_monomial(), - &ckzg_trusted_setup.g1_lagrange(), - &ckzg_trusted_setup.g2_monomial(), - NO_PRECOMPUTE, - )?, - context, - }) + Ok(Self { context }) } /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. pub fn new_from_trusted_setup(trusted_setup: &[u8]) -> Result { - let (ckzg_trusted_setup, rkzg_trusted_setup) = load_trusted_setup(trusted_setup)?; + let rkzg_trusted_setup = load_trusted_setup(trusted_setup)?; // It's not recommended to change the config parameter for precomputation as storage // grows exponentially, but the speedup is exponential - after a while the speedup @@ -100,15 +86,7 @@ impl Kzg { }, ); - Ok(Self { - trusted_setup: KzgSettings::load_trusted_setup( - &ckzg_trusted_setup.g1_monomial(), - &ckzg_trusted_setup.g1_lagrange(), - &ckzg_trusted_setup.g2_monomial(), - NO_PRECOMPUTE, - )?, - context, - }) + Ok(Self { context }) } fn context(&self) -> &DASContext { @@ -118,34 +96,35 @@ impl Kzg { /// Compute the kzg proof given a blob and its kzg commitment. pub fn compute_blob_kzg_proof( &self, - blob: &Blob, + blob: KzgBlobRef<'_>, kzg_commitment: KzgCommitment, ) -> Result { - self.trusted_setup - .compute_blob_kzg_proof(blob, &kzg_commitment.into()) - .map(|proof| KzgProof(proof.to_bytes().into_inner())) - .map_err(Into::into) + let proof = self + .context() + .compute_blob_kzg_proof(blob, &kzg_commitment.0) + .map_err(Error::Kzg)?; + Ok(KzgProof(proof)) } /// Verify a kzg proof given the blob, kzg commitment and kzg proof. pub fn verify_blob_kzg_proof( &self, - blob: &Blob, + blob: KzgBlobRef<'_>, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, ) -> Result<(), Error> { if cfg!(feature = "fake_crypto") { return Ok(()); } - if !self.trusted_setup.verify_blob_kzg_proof( - blob, - &kzg_commitment.into(), - &kzg_proof.into(), - )? { - Err(Error::KzgVerificationFailed) - } else { - Ok(()) - } + self.context() + .verify_blob_kzg_proof(blob, &kzg_commitment.0, &kzg_proof.0) + .map_err(|e| { + if e.is_proof_invalid() { + Error::KzgVerificationFailed + } else { + Error::Kzg(e) + } + }) } /// Verify a batch of blob commitment proof triplets. @@ -154,52 +133,48 @@ impl Kzg { /// TODO(pawan): test performance against a parallelized rayon impl. pub fn verify_blob_kzg_proof_batch( &self, - blobs: &[Blob], + blobs: &[KzgBlobRef<'_>], kzg_commitments: &[KzgCommitment], kzg_proofs: &[KzgProof], ) -> Result<(), Error> { if cfg!(feature = "fake_crypto") { return Ok(()); } - let commitments_bytes = kzg_commitments - .iter() - .map(|comm| Bytes48::from(*comm)) - .collect::>(); + let blob_refs: Vec<&[u8; BYTES_PER_BLOB]> = blobs.to_vec(); + let commitment_refs: Vec<&[u8; 48]> = kzg_commitments.iter().map(|c| &c.0).collect(); + let proof_refs: Vec<&[u8; 48]> = kzg_proofs.iter().map(|p| &p.0).collect(); - let proofs_bytes = kzg_proofs - .iter() - .map(|proof| Bytes48::from(*proof)) - .collect::>(); - - if !self.trusted_setup.verify_blob_kzg_proof_batch( - blobs, - &commitments_bytes, - &proofs_bytes, - )? { - Err(Error::KzgVerificationFailed) - } else { - Ok(()) - } + self.context() + .verify_blob_kzg_proof_batch(blob_refs, commitment_refs, proof_refs) + .map_err(|e| { + if e.is_proof_invalid() { + Error::KzgVerificationFailed + } else { + Error::Kzg(e) + } + }) } /// Converts a blob to a kzg commitment. - pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result { - self.trusted_setup + pub fn blob_to_kzg_commitment(&self, blob: KzgBlobRef<'_>) -> Result { + let commitment = self + .context() .blob_to_kzg_commitment(blob) - .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) - .map_err(Into::into) + .map_err(Error::Kzg)?; + Ok(KzgCommitment(commitment)) } /// Computes the kzg proof for a given `blob` and an evaluation point `z` pub fn compute_kzg_proof( &self, - blob: &Blob, + blob: KzgBlobRef<'_>, z: &Bytes32, ) -> Result<(KzgProof, Bytes32), Error> { - self.trusted_setup - .compute_kzg_proof(blob, z) - .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) - .map_err(Into::into) + let (proof, y) = self + .context() + .compute_kzg_proof(blob, *z) + .map_err(Error::Kzg)?; + Ok((KzgProof(proof), y)) } /// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` @@ -213,9 +188,14 @@ impl Kzg { if cfg!(feature = "fake_crypto") { return Ok(true); } - self.trusted_setup - .verify_kzg_proof(&kzg_commitment.into(), z, y, &kzg_proof.into()) - .map_err(Into::into) + match self + .context() + .verify_kzg_proof(&kzg_commitment.0, *z, *y, &kzg_proof.0) + { + Ok(()) => Ok(true), + Err(e) if e.is_proof_invalid() => Ok(false), + Err(e) => Err(Error::Kzg(e)), + } } /// Computes the cells and associated proofs for a given `blob`. @@ -226,18 +206,15 @@ impl Kzg { let (cells, proofs) = self .context() .compute_cells_and_kzg_proofs(blob) - .map_err(Error::PeerDASKZG)?; + .map_err(Error::Kzg)?; - // Convert the proof type to a c-kzg proof type - let c_kzg_proof = proofs.map(KzgProof); - Ok((cells, c_kzg_proof)) + let kzg_proofs = proofs.map(KzgProof); + Ok((cells, kzg_proofs)) } /// Computes the cells for a given `blob`. pub fn compute_cells(&self, blob: KzgBlobRef<'_>) -> Result<[Cell; CELLS_PER_EXT_BLOB], Error> { - self.context() - .compute_cells(blob) - .map_err(Error::PeerDASKZG) + self.context().compute_cells(blob).map_err(Error::Kzg) } /// Verifies a batch of cell-proof-commitment triplets. @@ -291,8 +268,8 @@ impl Kzg { for (cell, proof, commitment) in &column_data { cells.push(*cell); - proofs.push(proof.as_ref()); - commitments.push(commitment.as_ref()); + proofs.push(proof); + commitments.push(commitment); } // Create per-chunk tracing span for visualizing parallel processing. @@ -319,7 +296,7 @@ impl Kzg { Err(e) if e.is_proof_invalid() => { Err((Some(column_index), Error::KzgVerificationFailed)) } - Err(e) => Err((Some(column_index), Error::PeerDASKZG(e))), + Err(e) => Err((Some(column_index), Error::Kzg(e))), } }) .collect::, (Option, Error)>>()?; @@ -335,10 +312,9 @@ impl Kzg { let (cells, proofs) = self .context() .recover_cells_and_kzg_proofs(cell_ids.to_vec(), cells.to_vec()) - .map_err(Error::PeerDASKZG)?; + .map_err(Error::Kzg)?; - // Convert the proof type to a c-kzg proof type - let c_kzg_proof = proofs.map(KzgProof); - Ok((cells, c_kzg_proof)) + let kzg_proofs = proofs.map(KzgProof); + Ok((cells, kzg_proofs)) } } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index 75884b8199..5c285b50f2 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -24,7 +24,7 @@ struct G1Point([u8; BYTES_PER_G1_POINT]); struct G2Point([u8; BYTES_PER_G2_POINT]); /// Contains the trusted setup parameters that are required to instantiate a -/// `c_kzg::KzgSettings` object. +/// `rust_eth_kzg::TrustedSetup` object. /// /// The serialize/deserialize implementations are written according to /// the format specified in the ethereum consensus specs trusted setup files. @@ -155,19 +155,9 @@ fn strip_prefix(s: &str) -> &str { } } -/// Loads the trusted setup from JSON. -/// -/// ## Note: -/// Currently we load both c-kzg and rust-eth-kzg trusted setup structs, because c-kzg is still being -/// used for 4844. Longer term we're planning to switch all KZG operations to the rust-eth-kzg -/// crate, and we'll be able to maintain a single trusted setup struct. -pub(crate) fn load_trusted_setup( - trusted_setup: &[u8], -) -> Result<(TrustedSetup, PeerDASTrustedSetup), Error> { - let ckzg_trusted_setup: TrustedSetup = serde_json::from_slice(trusted_setup) - .map_err(|e| Error::TrustedSetupError(format!("{e:?}")))?; +/// Loads the trusted setup from JSON bytes into a `rust_eth_kzg::TrustedSetup`. +pub(crate) fn load_trusted_setup(trusted_setup: &[u8]) -> Result { let trusted_setup_json = std::str::from_utf8(trusted_setup) .map_err(|e| Error::TrustedSetupError(format!("{e:?}")))?; - let rkzg_trusted_setup = PeerDASTrustedSetup::from_json(trusted_setup_json); - Ok((ckzg_trusted_setup, rkzg_trusted_setup)) + Ok(PeerDASTrustedSetup::from_json(trusted_setup_json)) } diff --git a/deny.toml b/deny.toml index 3b230155f7..cf0cd7d3cd 100644 --- a/deny.toml +++ b/deny.toml @@ -11,6 +11,7 @@ deny = [ { crate = "derivative", reason = "use educe or derive_more instead" }, { crate = "ark-ff", reason = "present in Cargo.lock but not needed by Lighthouse" }, { crate = "openssl", reason = "non-Rust dependency, use rustls instead" }, + { crate = "c-kzg", reason = "non-Rust dependency, use rust_eth_kzg instead" }, { crate = "strum", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "reqwest", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "aes", deny-multiple-versions = true, reason = "takes a long time to compile" }, diff --git a/slasher/service/src/lib.rs b/slasher/service/src/lib.rs index ac15b49ee9..69ec59aa2c 100644 --- a/slasher/service/src/lib.rs +++ b/slasher/service/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::result_large_err)] mod service; pub use service::SlasherService; diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fd8a3f6da0..48378a4c95 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.2 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.3 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 782b554ff1..dd6be14306 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -47,6 +47,8 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2", "tests/.*/eip7732", "tests/.*/eip7805", + # Heze fork is not implemented + "tests/.*/heze/.*", # TODO(gloas): remove these ignores as Gloas consensus is implemented "tests/.*/gloas/fork_choice/.*", # Ignore MatrixEntry SSZ tests for now. diff --git a/testing/ef_tests/download_test_vectors.sh b/testing/ef_tests/download_test_vectors.sh index ff5b61bb47..f91b2d1c38 100755 --- a/testing/ef_tests/download_test_vectors.sh +++ b/testing/ef_tests/download_test_vectors.sh @@ -10,7 +10,7 @@ if [[ "$version" == "nightly" || "$version" =~ ^nightly-[0-9]+$ ]]; then exit 1 fi - for cmd in unzip jq; do + for cmd in jq; do if ! command -v "${cmd}" >/dev/null 2>&1; then echo "Error ${cmd} is not installed" exit 1 @@ -48,13 +48,10 @@ if [[ "$version" == "nightly" || "$version" =~ ^nightly-[0-9]+$ ]]; then echo "Downloading artifact: ${name}" curl --progress-bar --location --show-error --retry 3 --retry-all-errors --fail \ -H "${auth_header}" -H "Accept: application/vnd.github+json" \ - --output "${name}.zip" "${url}" || { + --output "${name}" "${url}" || { echo "Failed to download ${name}" exit 1 } - - unzip -qo "${name}.zip" - rm -f "${name}.zip" done else for test in "${TESTS[@]}"; do diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index ca77dc8d79..07a7d4c6b6 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -3,7 +3,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; use beacon_chain::blob_verification::GossipBlobError; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::chain_config::{ DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, DisallowedReOrgOffsets, @@ -561,21 +561,13 @@ impl Tester { let block = Arc::new(block); let result: Result, _> = self - .block_on_dangerous( - self.harness.chain.process_block( - block_root, - RpcBlock::new( - block.clone(), - None, - &self.harness.chain.data_availability_checker, - self.harness.chain.spec.clone(), - ) - .map_err(|e| Error::InternalError(format!("{:?}", e)))?, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ), - )? + .block_on_dangerous(self.harness.chain.process_block( + block_root, + LookupBlock::new(block.clone()), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); let success = data_column_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { @@ -659,21 +651,13 @@ impl Tester { let block = Arc::new(block); let result: Result, _> = self - .block_on_dangerous( - self.harness.chain.process_block( - block_root, - RpcBlock::new( - block.clone(), - None, - &self.harness.chain.data_availability_checker, - self.harness.chain.spec.clone(), - ) - .map_err(|e| Error::InternalError(format!("{:?}", e)))?, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ), - )? + .block_on_dangerous(self.harness.chain.process_block( + block_root, + LookupBlock::new(block.clone()), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); let success = blob_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index 7973af861f..200f439c28 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use kzg::{Bytes48, Error as KzgError}; +use kzg::Error as KzgError; use serde::Deserialize; use std::marker::PhantomData; @@ -47,8 +47,8 @@ impl Case for KZGVerifyCellKZGProofBatch { let result = parse_input(&self.input).and_then(|(cells, proofs, cell_indices, commitments)| { - let proofs: Vec = proofs.iter().map(|&proof| proof.into()).collect(); - let commitments: Vec = commitments.iter().map(|&c| c.into()).collect(); + let proofs = proofs.iter().map(|&proof| proof.0).collect::>(); + let commitments = commitments.iter().map(|&c| c.0).collect::>(); let cells = cells.iter().map(|c| c.as_ref()).collect::>(); let kzg = get_kzg(); match kzg.verify_cell_proof_batch(&cells, &proofs, cell_indices, &commitments) { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index ca0124e1aa..798c66b666 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -716,8 +716,13 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. - let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match O::decode(&path.join(O::filename()), fork_name, spec) { + let operation_path = path.join(O::filename()); + let (operation, bls_error) = if !operation_path.is_file() { + // Some test cases (e.g. builder_voluntary_exit__success) have no operation file. + // TODO(gloas): remove this once the test vectors are fixed + (None, None) + } else if metadata.bls_setting.unwrap_or_default().check().is_ok() { + match O::decode(&operation_path, fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index da3c5533b6..f8c16aec0b 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -537,11 +537,6 @@ impl Handler for RandomHandler { fn handler_name(&self) -> String { "random".into() } - - fn disabled_forks(&self) -> Vec { - // TODO(gloas): remove once we have Gloas random tests - vec![ForkName::Gloas] - } } #[derive(Educe)] diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index a9d0a0756b..79581ee529 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -363,7 +363,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { network_1.add_beacon_node_with_delay( beacon_config.clone(), mock_execution_config.clone(), - END_EPOCH - 1, + END_EPOCH - 3, slot_duration, slots_per_epoch ), diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 35200692c3..de202e5812 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -463,6 +463,9 @@ pub async fn reconnect_to_execution_layer( } /// Ensure all validators have attested correctly. +/// +/// Checks attestation rewards for head, target, and source. +/// A positive reward indicates a correct vote. pub async fn check_attestation_correctness( network: LocalNetwork, start_epoch: u64, @@ -476,54 +479,49 @@ pub async fn check_attestation_correctness( let remote_node = &network.remote_nodes()?[node_index]; - let results = remote_node - .get_lighthouse_analysis_attestation_performance( - Epoch::new(start_epoch), - Epoch::new(upto_epoch - 2), - "global".to_string(), - ) - .await - .map_err(|e| format!("Unable to get attestation performance: {e}"))?; - - let mut active_successes: f64 = 0.0; let mut head_successes: f64 = 0.0; let mut target_successes: f64 = 0.0; let mut source_successes: f64 = 0.0; - let mut total: f64 = 0.0; - for result in results { - for epochs in result.epochs.values() { + let end_epoch = upto_epoch + .checked_sub(2) + .ok_or_else(|| "upto_epoch must be >= 2 to have attestation rewards".to_string())?; + for epoch in start_epoch..=end_epoch { + let response = remote_node + .post_beacon_rewards_attestations(Epoch::new(epoch), &[]) + .await + .map_err(|e| format!("Unable to get attestation rewards for epoch {epoch}: {e}"))?; + + for reward in &response.data.total_rewards { total += 1.0; - if epochs.active { - active_successes += 1.0; - } - if epochs.head { + // A positive reward means the validator made a correct vote. + if reward.head > 0 { head_successes += 1.0; } - if epochs.target { + if reward.target > 0 { target_successes += 1.0; } - if epochs.source { + if reward.source > 0 { source_successes += 1.0; } } } - let active_percent = active_successes / total * 100.0; + + if total == 0.0 { + return Err("No attestation rewards data found".to_string()); + } + let head_percent = head_successes / total * 100.0; let target_percent = target_successes / total * 100.0; let source_percent = source_successes / total * 100.0; eprintln!("Total Attestations: {}", total); - eprintln!("Active: {}: {}%", active_successes, active_percent); eprintln!("Head: {}: {}%", head_successes, head_percent); eprintln!("Target: {}: {}%", target_successes, target_percent); eprintln!("Source: {}: {}%", source_successes, source_percent); - if active_percent < acceptable_attestation_performance { - return Err("Active percent was below required level".to_string()); - } if head_percent < acceptable_attestation_performance { return Err("Head percent was below required level".to_string()); } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 4b9432b67b..1f36f8d4ce 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -25,6 +25,7 @@ mod tests { use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; use fixed_bytes::FixedBytesExtended; + use futures::StreamExt; use initialized_validators::{ InitializedValidators, load_pem_certificate, load_pkcs12_identity, }; @@ -50,7 +51,7 @@ mod tests { use types::{attestation::AttestationBase, *}; use url::Url; use validator_store::{ - Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore, + AttestationToSign, Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore, }; /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will @@ -654,13 +655,14 @@ mod tests { .await .assert_signatures_match("attestation", |pubkey, validator_store| async move { let attestation = get_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await - .unwrap() - .pop() - .unwrap() - .1 + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap().unwrap().pop().unwrap().1 }) .await .assert_signatures_match("signed_aggregate", |pubkey, validator_store| async move { @@ -879,22 +881,28 @@ mod tests { .await .assert_signatures_match("first_attestation", |pubkey, validator_store| async move { let attestation = first_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await - .unwrap() - .pop() - .unwrap() - .1 + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap().unwrap().pop().unwrap().1 }) .await .assert_slashable_attestation_should_sign( "double_vote_attestation", move |pubkey, validator_store| async move { let attestation = double_vote_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap() }, slashable_message_should_sign, ) @@ -903,9 +911,14 @@ mod tests { "surrounding_attestation", move |pubkey, validator_store| async move { let attestation = surrounding_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap() }, slashable_message_should_sign, ) @@ -914,9 +927,14 @@ mod tests { "surrounded_attestation", move |pubkey, validator_store| async move { let attestation = surrounded_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap() }, slashable_message_should_sign, ) diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 601b2f1666..eb35075526 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -9,6 +9,7 @@ use eth2::lighthouse_vc::{ types::Web3SignerValidatorRequest, }; use fixed_bytes::FixedBytesExtended; +use futures::StreamExt; use itertools::Itertools; use lighthouse_validator_store::DEFAULT_GAS_LIMIT; use rand::rngs::StdRng; @@ -19,6 +20,7 @@ use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; use typenum::Unsigned; use types::{Address, attestation::AttestationBase}; +use validator_store::AttestationToSign; use validator_store::ValidatorStore; use zeroize::Zeroizing; @@ -1101,11 +1103,16 @@ async fn generic_migration_test( // Sign attestations on VC1. for (validator_index, attestation) in first_vc_attestations { let public_key = keystore_pubkey(&keystores[validator_index]); - let safe_attestations = tester1 + let stream = tester1 .validator_store - .sign_attestations(vec![(0, public_key, 0, attestation.clone())]) - .await - .unwrap(); + .sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey: public_key, + validator_committee_index: 0, + attestation: attestation.clone(), + }]); + tokio::pin!(stream); + let safe_attestations = stream.next().await.unwrap().unwrap(); assert_eq!(safe_attestations.len(), 1); // Compare data only, ignoring signatures which are added during signing. assert_eq!(safe_attestations[0].1.data(), attestation.data()); @@ -1184,10 +1191,16 @@ async fn generic_migration_test( // Sign attestations on the second VC. for (validator_index, attestation, should_succeed) in second_vc_attestations { let public_key = keystore_pubkey(&keystores[validator_index]); - let result = tester2 + let stream = tester2 .validator_store - .sign_attestations(vec![(0, public_key, 0, attestation.clone())]) - .await; + .sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey: public_key, + validator_committee_index: 0, + attestation: attestation.clone(), + }]); + tokio::pin!(stream); + let result = stream.next().await.unwrap(); match result { Ok(safe_attestations) => { if should_succeed { @@ -1331,14 +1344,14 @@ async fn delete_concurrent_with_signing() { for j in 0..num_attestations { let att = make_attestation(j, j + 1); for (validator_index, public_key) in thread_pubkeys.iter().enumerate() { - let _ = validator_store - .sign_attestations(vec![( - validator_index as u64, - *public_key, - 0, - att.clone(), - )]) - .await; + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: validator_index as u64, + pubkey: *public_key, + validator_committee_index: 0, + attestation: att.clone(), + }]); + tokio::pin!(stream); + let _ = stream.next().await; } } }); diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 7806482ffb..e8c1cfbc43 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -2,7 +2,7 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition} use bls::{PublicKeyBytes, Signature}; use doppelganger_service::DoppelgangerService; use eth2::types::PublishBlockRequest; -use futures::future::join_all; +use futures::{Stream, future::join_all, stream}; use initialized_validators::InitializedValidators; use logging::crit; use parking_lot::{Mutex, RwLock}; @@ -17,7 +17,7 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use tracing::{error, info, instrument, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecutionPayloadEnvelope, Fork, @@ -28,7 +28,8 @@ use types::{ ValidatorRegistrationData, VoluntaryExit, graffiti::GraffitiString, }; use validator_store::{ - DoppelgangerStatus, Error as ValidatorStoreError, ProposalData, SignedBlock, UnsignedBlock, + AggregateToSign, AttestationToSign, ContributionToSign, DoppelgangerStatus, + Error as ValidatorStoreError, ProposalData, SignedBlock, SyncMessageToSign, UnsignedBlock, ValidatorStore, }; @@ -691,6 +692,119 @@ impl LighthouseValidatorStore { Ok(safe_attestations) } + + /// Signs an `AggregateAndProof` for a given validator. + /// + /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be + /// modified by actors other than the signing validator. + pub async fn produce_signed_aggregate_and_proof( + &self, + validator_pubkey: PublicKeyBytes, + aggregator_index: u64, + aggregate: Attestation, + selection_proof: SelectionProof, + ) -> Result, Error> { + let signing_epoch = aggregate.data().target.epoch; + let signing_context = self.signing_context(Domain::AggregateAndProof, signing_epoch); + + let message = + AggregateAndProof::from_attestation(aggregator_index, aggregate, selection_proof); + + let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; + let signature = signing_method + .get_signature::>( + SignableMessage::SignedAggregateAndProof(message.to_ref()), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_AGGREGATES_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SignedAggregateAndProof::from_aggregate_and_proof( + message, signature, + )) + } + + pub async fn produce_sync_committee_signature( + &self, + slot: Slot, + beacon_block_root: Hash256, + validator_index: u64, + validator_pubkey: &PublicKeyBytes, + ) -> Result { + let signing_epoch = slot.epoch(E::slots_per_epoch()); + let signing_context = self.signing_context(Domain::SyncCommittee, signing_epoch); + + // Bypass `with_validator_signing_method`: sync committee messages are not slashable. + let signing_method = self.doppelganger_bypassed_signing_method(*validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::SyncCommitteeSignature { + beacon_block_root, + slot, + }, + signing_context, + &self.spec, + &self.task_executor, + ) + .await + .map_err(Error::SpecificError)?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SyncCommitteeMessage { + slot, + beacon_block_root, + validator_index, + signature, + }) + } + + pub async fn produce_signed_contribution_and_proof( + &self, + aggregator_index: u64, + aggregator_pubkey: PublicKeyBytes, + contribution: SyncCommitteeContribution, + selection_proof: SyncSelectionProof, + ) -> Result, Error> { + let signing_epoch = contribution.slot.epoch(E::slots_per_epoch()); + let signing_context = self.signing_context(Domain::ContributionAndProof, signing_epoch); + + // Bypass `with_validator_signing_method`: sync committee messages are not slashable. + let signing_method = self.doppelganger_bypassed_signing_method(aggregator_pubkey)?; + + let message = ContributionAndProof { + aggregator_index, + contribution, + selection_proof: selection_proof.into(), + }; + + let signature = signing_method + .get_signature::>( + SignableMessage::SignedContributionAndProof(&message), + signing_context, + &self.spec, + &self.task_executor, + ) + .await + .map_err(Error::SpecificError)?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SignedContributionAndProof { message, signature }) + } } impl ValidatorStore for LighthouseValidatorStore { @@ -882,72 +996,83 @@ impl ValidatorStore for LighthouseValidatorS } } - async fn sign_attestations( + fn sign_attestations( self: &Arc, - mut attestations: Vec<(u64, PublicKeyBytes, usize, Attestation)>, - ) -> Result)>, Error> { - // Sign all attestations concurrently. - let signing_futures = - attestations - .iter_mut() - .map(|(_, pubkey, validator_committee_index, attestation)| { + mut attestations: Vec>, + ) -> impl Stream)>, Error>> + Send { + let store = self.clone(); + stream::once(async move { + // Sign all attestations concurrently. + let signing_futures = attestations.iter_mut().map( + |AttestationToSign { + pubkey, + validator_committee_index, + attestation, + .. + }| { let pubkey = *pubkey; let validator_committee_index = *validator_committee_index; + let store = store.clone(); async move { - self.sign_attestation_no_slashing_protection( - pubkey, - validator_committee_index, - attestation, - ) - .await + store + .sign_attestation_no_slashing_protection( + pubkey, + validator_committee_index, + attestation, + ) + .await } - }); + }, + ); - // Execute all signing in parallel. - let results: Vec<_> = join_all(signing_futures).await; + // Execute all signing in parallel. + let results: Vec<_> = join_all(signing_futures).await; - // Collect successfully signed attestations and log errors. - let mut signed_attestations = Vec::with_capacity(attestations.len()); - for (result, (validator_index, pubkey, _, attestation)) in - results.into_iter().zip(attestations.into_iter()) - { - match result { - Ok(()) => { - signed_attestations.push((validator_index, attestation, pubkey)); - } - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - warn!( - info = "a validator may have recently been removed from this VC", - ?pubkey, - "Missing pubkey for attestation" - ); - } - Err(e) => { - crit!( - error = ?e, - "Failed to sign attestation" - ); + // Collect successfully signed attestations and log errors. + let mut signed_attestations = Vec::with_capacity(attestations.len()); + for (result, att) in results.into_iter().zip(attestations.into_iter()) { + match result { + Ok(()) => { + signed_attestations.push(( + att.validator_index, + att.attestation, + att.pubkey, + )); + } + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + warn!( + info = "a validator may have recently been removed from this VC", + ?pubkey, + "Missing pubkey for attestation" + ); + } + Err(e) => { + crit!( + error = ?e, + "Failed to sign attestation" + ); + } } } - } - if signed_attestations.is_empty() { - return Ok(vec![]); - } + if signed_attestations.is_empty() { + return Ok(vec![]); + } - // Check slashing protection and insert into database. Use a dedicated blocking thread - // to avoid clogging the async executor with blocking database I/O. - let validator_store = self.clone(); - let safe_attestations = self - .task_executor - .spawn_blocking_handle( - move || validator_store.slashing_protect_attestations(signed_attestations), - "slashing_protect_attestations", - ) - .ok_or(Error::ExecutorError)? - .await - .map_err(|_| Error::ExecutorError)??; - Ok(safe_attestations) + // Check slashing protection and insert into database. Use a dedicated blocking + // thread to avoid clogging the async executor with blocking database I/O. + let validator_store = store.clone(); + let safe_attestations = store + .task_executor + .spawn_blocking_handle( + move || validator_store.slashing_protect_attestations(signed_attestations), + "slashing_protect_attestations", + ) + .ok_or(Error::ExecutorError)? + .await + .map_err(|_| Error::ExecutorError)??; + Ok(safe_attestations) + }) } async fn sign_validator_registration_data( @@ -979,43 +1104,6 @@ impl ValidatorStore for LighthouseValidatorS }) } - /// Signs an `AggregateAndProof` for a given validator. - /// - /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be - /// modified by actors other than the signing validator. - async fn produce_signed_aggregate_and_proof( - &self, - validator_pubkey: PublicKeyBytes, - aggregator_index: u64, - aggregate: Attestation, - selection_proof: SelectionProof, - ) -> Result, Error> { - let signing_epoch = aggregate.data().target.epoch; - let signing_context = self.signing_context(Domain::AggregateAndProof, signing_epoch); - - let message = - AggregateAndProof::from_attestation(aggregator_index, aggregate, selection_proof); - - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; - let signature = signing_method - .get_signature::>( - SignableMessage::SignedAggregateAndProof(message.to_ref()), - signing_context, - &self.spec, - &self.task_executor, - ) - .await?; - - validator_metrics::inc_counter_vec( - &validator_metrics::SIGNED_AGGREGATES_TOTAL, - &[validator_metrics::SUCCESS], - ); - - Ok(SignedAggregateAndProof::from_aggregate_and_proof( - message, signature, - )) - } - /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to /// `validator_pubkey`. async fn produce_selection_proof( @@ -1090,80 +1178,172 @@ impl ValidatorStore for LighthouseValidatorS Ok(signature.into()) } - async fn produce_sync_committee_signature( - &self, - slot: Slot, - beacon_block_root: Hash256, - validator_index: u64, - validator_pubkey: &PublicKeyBytes, - ) -> Result { - let signing_epoch = slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::SyncCommittee, signing_epoch); - - // Bypass `with_validator_signing_method`: sync committee messages are not slashable. - let signing_method = self.doppelganger_bypassed_signing_method(*validator_pubkey)?; - - let signature = signing_method - .get_signature::>( - SignableMessage::SyncCommitteeSignature { - beacon_block_root, - slot, + fn sign_aggregate_and_proofs( + self: &Arc, + aggregates: Vec>, + ) -> impl Stream>, Error>> + Send { + let store = self.clone(); + let count = aggregates.len(); + stream::once(async move { + let signing_futures = aggregates.into_iter().map( + |AggregateToSign { + pubkey, + aggregator_index, + aggregate, + selection_proof, + }| { + let store = store.clone(); + async move { + let result = store + .produce_signed_aggregate_and_proof( + pubkey, + aggregator_index, + aggregate, + selection_proof, + ) + .await; + (pubkey, result) + } }, - signing_context, - &self.spec, - &self.task_executor, - ) - .await - .map_err(Error::SpecificError)?; + ); - validator_metrics::inc_counter_vec( - &validator_metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, - &[validator_metrics::SUCCESS], - ); + let results = join_all(signing_futures) + .instrument(info_span!("sign_aggregates", count)) + .await; - Ok(SyncCommitteeMessage { - slot, - beacon_block_root, - validator_index, - signature, + let mut signed = Vec::with_capacity(results.len()); + for (pubkey, result) in results { + match result { + Ok(agg) => signed.push(agg), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!(?pubkey, "Missing pubkey for aggregate"); + } + Err(e) => { + crit!(error = ?e, pubkey = ?pubkey, "Failed to sign aggregate"); + } + } + } + Ok(signed) }) } - async fn produce_signed_contribution_and_proof( - &self, - aggregator_index: u64, - aggregator_pubkey: PublicKeyBytes, - contribution: SyncCommitteeContribution, - selection_proof: SyncSelectionProof, - ) -> Result, Error> { - let signing_epoch = contribution.slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::ContributionAndProof, signing_epoch); + fn sign_sync_committee_signatures( + self: &Arc, + messages: Vec, + ) -> impl Stream, Error>> + Send { + let store = self.clone(); + let count = messages.len(); + stream::once(async move { + let signing_futures = messages.into_iter().map( + |SyncMessageToSign { + slot, + beacon_block_root, + validator_index, + pubkey, + }| { + let store = store.clone(); + async move { + let result = store + .produce_sync_committee_signature( + slot, + beacon_block_root, + validator_index, + &pubkey, + ) + .await; + (pubkey, validator_index, slot, result) + } + }, + ); - // Bypass `with_validator_signing_method`: sync committee messages are not slashable. - let signing_method = self.doppelganger_bypassed_signing_method(aggregator_pubkey)?; + let results = join_all(signing_futures) + .instrument(info_span!("sign_sync_signatures", count)) + .await; - let message = ContributionAndProof { - aggregator_index, - contribution, - selection_proof: selection_proof.into(), - }; + let mut signed = Vec::with_capacity(results.len()); + for (_pubkey, validator_index, slot, result) in results { + match result { + Ok(sig) => signed.push(sig), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + ?pubkey, + validator_index, + %slot, + "Missing pubkey for sync committee signature" + ); + } + Err(e) => { + crit!( + validator_index, + %slot, + error = ?e, + "Failed to sign sync committee signature" + ); + } + } + } + Ok(signed) + }) + } - let signature = signing_method - .get_signature::>( - SignableMessage::SignedContributionAndProof(&message), - signing_context, - &self.spec, - &self.task_executor, - ) - .await - .map_err(Error::SpecificError)?; + fn sign_sync_committee_contributions( + self: &Arc, + contributions: Vec>, + ) -> impl Stream>, Error>> + Send { + let store = self.clone(); + let count = contributions.len(); + stream::once(async move { + let signing_futures = contributions.into_iter().map( + |ContributionToSign { + aggregator_index, + aggregator_pubkey, + contribution, + selection_proof, + }| { + let store = store.clone(); + let slot = contribution.slot; + async move { + let result = store + .produce_signed_contribution_and_proof( + aggregator_index, + aggregator_pubkey, + contribution, + selection_proof, + ) + .await; + (slot, result) + } + }, + ); - validator_metrics::inc_counter_vec( - &validator_metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, - &[validator_metrics::SUCCESS], - ); + let results = join_all(signing_futures) + .instrument(info_span!("sign_sync_contributions", count)) + .await; - Ok(SignedContributionAndProof { message, signature }) + let mut signed = Vec::with_capacity(results.len()); + for (slot, result) in results { + match result { + Ok(contribution) => signed.push(contribution), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!(?pubkey, %slot, "Missing pubkey for sync contribution"); + } + Err(e) => { + crit!( + %slot, + error = ?e, + "Unable to sign sync committee contribution" + ); + } + } + } + Ok(signed) + }) } /// Prune the slashing protection database so that it remains performant. diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 695a693385..8017941ca6 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } autotests = false [features] -arbitrary-fuzz = ["dep:arbitrary", "types/arbitrary-fuzz", "eip_3076/arbitrary-fuzz"] +arbitrary = ["dep:arbitrary", "types/arbitrary", "eip_3076/arbitrary"] portable = ["types/portable"] [dependencies] diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index c5c3df7ea4..996116dd1c 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -11,7 +11,7 @@ use tempfile::tempdir; use types::{Epoch, Hash256, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct MultiTestCase { pub name: String, pub genesis_validators_root: Hash256, @@ -19,7 +19,7 @@ pub struct MultiTestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestCase { pub should_succeed: bool, pub contains_slashable_data: bool, @@ -29,7 +29,7 @@ pub struct TestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestBlock { pub pubkey: PublicKeyBytes, pub slot: Slot, @@ -39,7 +39,7 @@ pub struct TestBlock { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestAttestation { pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index a9d5283312..fe808efd88 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,6 +1,6 @@ use crate::duties_service::{DutiesService, DutyAndProof}; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, beacon_head_monitor::HeadEvent}; -use futures::future::join_all; +use futures::StreamExt; use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; @@ -13,7 +13,7 @@ use tokio::time::{Duration, Instant, sleep, sleep_until}; use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Hash256, Slot}; -use validator_store::{Error as ValidatorStoreError, ValidatorStore}; +use validator_store::{AggregateToSign, AttestationToSign, ValidatorStore}; /// Builds an `AttestationService`. #[derive(Default)] @@ -560,12 +560,12 @@ impl AttestationService AttestationService(attestation_data.slot); - let single_attestations = safe_attestations - .iter() - .filter_map(|(i, a)| { - match a.to_single_attestation_with_attester_index(*i) { - Ok(a) => Some(a), - Err(e) => { - // This shouldn't happen unless BN and VC are out of sync with - // respect to the Electra fork. - error!( - error = ?e, + // Publish each batch as it arrives from the stream. + let mut received_non_empty_batch = false; + while let Some(result) = attestation_stream.next().await { + match result { + Ok(batch) if !batch.is_empty() => { + received_non_empty_batch = true; + + let single_attestations = batch + .iter() + .filter_map(|(attester_index, attestation)| { + match attestation + .to_single_attestation_with_attester_index(*attester_index) + { + Ok(single_attestation) => Some(single_attestation), + Err(e) => { + // This shouldn't happen unless BN and VC are out of sync with + // respect to the Electra fork. + error!( + error = ?e, + committee_index = attestation_data.index, + slot = slot.as_u64(), + "type" = "unaggregated", + "Unable to convert to SingleAttestation" + ); + None + } + } + }) + .collect::>(); + let single_attestations = &single_attestations; + let validator_indices = single_attestations + .iter() + .map(|att| att.attester_index) + .collect::>(); + let published_count = single_attestations.len(); + + // Post the attestations to the BN. + match self + .beacon_nodes + .request(ApiTopic::Attestations, |beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_POST], + ); + + beacon_node + .post_beacon_pool_attestations_v2::( + single_attestations.clone(), + fork_name, + ) + .await + }) + .instrument(info_span!("publish_attestations", count = published_count)) + .await + { + Ok(()) => info!( + count = published_count, + validator_indices = ?validator_indices, + head_block = ?attestation_data.beacon_block_root, + committee_index = attestation_data.index, + slot = attestation_data.slot.as_u64(), + "type" = "unaggregated", + "Successfully published attestations" + ), + Err(e) => error!( + error = %e, committee_index = attestation_data.index, slot = slot.as_u64(), "type" = "unaggregated", - "Unable to convert to SingleAttestation" - ); - None + "Unable to publish attestations" + ), } } - }) - .collect::>(); - let single_attestations = &single_attestations; - let validator_indices = single_attestations - .iter() - .map(|att| att.attester_index) - .collect::>(); - let published_count = single_attestations.len(); + Err(e) => { + crit!(error = ?e, "Failed to sign attestations"); + } + _ => {} + } + } - // Post the attestations to the BN. - match self - .beacon_nodes - .request(ApiTopic::Attestations, |beacon_node| async move { - let _timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::ATTESTATIONS_HTTP_POST], - ); - - beacon_node - .post_beacon_pool_attestations_v2::( - single_attestations.clone(), - fork_name, - ) - .await - }) - .instrument(info_span!("publish_attestations", count = published_count)) - .await - { - Ok(()) => info!( - count = published_count, - validator_indices = ?validator_indices, - head_block = ?attestation_data.beacon_block_root, - committee_index = attestation_data.index, - slot = attestation_data.slot.as_u64(), - "type" = "unaggregated", - "Successfully published attestations" - ), - Err(e) => error!( - error = %e, - committee_index = attestation_data.index, - slot = slot.as_u64(), - "type" = "unaggregated", - "Unable to publish attestations" - ), + if !received_non_empty_batch { + warn!("No attestations were published"); } Ok(()) @@ -725,113 +737,103 @@ impl AttestationService(attestation_data, &self.chain_spec) { - crit!("Inconsistent validator duties during signing"); - return None; - } - - match self - .validator_store - .produce_signed_aggregate_and_proof( - duty.pubkey, - duty.validator_index, - aggregated_attestation.clone(), - selection_proof.clone(), - ) - .await - { - Ok(aggregate) => Some(aggregate), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!(?pubkey, "Missing pubkey for aggregate"); - None - } - Err(e) => { - crit!( - error = ?e, - pubkey = ?duty.pubkey, - "Failed to sign aggregate" - ); - None - } - } - }); - - // Execute all the futures in parallel, collecting any successful results. - let aggregator_count = validator_duties + // Build the batch of aggregates to sign. + let aggregates_to_sign: Vec<_> = validator_duties .iter() - .filter(|d| d.selection_proof.is_some()) - .count(); - let signed_aggregate_and_proofs = join_all(signing_futures) - .instrument(info_span!("sign_aggregates", count = aggregator_count)) - .await - .into_iter() - .flatten() - .collect::>(); + .filter_map(|duty_and_proof| { + let duty = &duty_and_proof.duty; + let selection_proof = duty_and_proof.selection_proof.as_ref()?; - if !signed_aggregate_and_proofs.is_empty() { - let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice(); - match self - .beacon_nodes - .first_success(|beacon_node| async move { - let _timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::AGGREGATES_HTTP_POST], - ); - if fork_name.electra_enabled() { - beacon_node - .post_validator_aggregate_and_proof_v2( - signed_aggregate_and_proofs_slice, - fork_name, - ) - .await - } else { - beacon_node - .post_validator_aggregate_and_proof_v1( - signed_aggregate_and_proofs_slice, - ) - .await - } + if !duty.match_attestation_data::(attestation_data, &self.chain_spec) { + crit!("Inconsistent validator duties during signing"); + return None; + } + + Some(AggregateToSign { + pubkey: duty.pubkey, + aggregator_index: duty.validator_index, + aggregate: aggregated_attestation.clone(), + selection_proof: selection_proof.clone(), }) - .instrument(info_span!( - "publish_aggregates", - count = signed_aggregate_and_proofs.len() - )) - .await - { - Ok(()) => { - for signed_aggregate_and_proof in signed_aggregate_and_proofs { - let attestation = signed_aggregate_and_proof.message().aggregate(); - info!( - aggregator = signed_aggregate_and_proof.message().aggregator_index(), - signatures = attestation.num_set_aggregation_bits(), - head_block = format!("{:?}", attestation.data().beacon_block_root), - committee_index = attestation.committee_index(), - slot = attestation.data().slot.as_u64(), - "type" = "aggregated", - "Successfully published attestation" - ); + }) + .collect(); + + // Sign aggregates. Returns a stream of batches. + let aggregate_stream = self + .validator_store + .sign_aggregate_and_proofs(aggregates_to_sign); + tokio::pin!(aggregate_stream); + + // Publish each batch as it arrives from the stream. + while let Some(result) = aggregate_stream.next().await { + match result { + Ok(batch) if !batch.is_empty() => { + let signed_aggregate_and_proofs = batch.as_slice(); + match self + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES_HTTP_POST], + ); + if fork_name.electra_enabled() { + beacon_node + .post_validator_aggregate_and_proof_v2( + signed_aggregate_and_proofs, + fork_name, + ) + .await + } else { + beacon_node + .post_validator_aggregate_and_proof_v1( + signed_aggregate_and_proofs, + ) + .await + } + }) + .instrument(info_span!( + "publish_aggregates", + count = signed_aggregate_and_proofs.len() + )) + .await + { + Ok(()) => { + for signed_aggregate_and_proof in signed_aggregate_and_proofs { + let attestation = signed_aggregate_and_proof.message().aggregate(); + info!( + aggregator = + signed_aggregate_and_proof.message().aggregator_index(), + signatures = attestation.num_set_aggregation_bits(), + head_block = + format!("{:?}", attestation.data().beacon_block_root), + committee_index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "type" = "aggregated", + "Successfully published attestation" + ); + } + } + Err(e) => { + for signed_aggregate_and_proof in signed_aggregate_and_proofs { + let attestation = &signed_aggregate_and_proof.message().aggregate(); + crit!( + error = %e, + aggregator = signed_aggregate_and_proof + .message() + .aggregator_index(), + committee_index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "type" = "aggregated", + "Failed to publish attestation" + ); + } + } } } Err(e) => { - for signed_aggregate_and_proof in signed_aggregate_and_proofs { - let attestation = &signed_aggregate_and_proof.message().aggregate(); - crit!( - error = %e, - aggregator = signed_aggregate_and_proof.message().aggregator_index(), - committee_index = attestation.committee_index(), - slot = attestation.data().slot.as_u64(), - "type" = "aggregated", - "Failed to publish attestation" - ); - } + crit!(error = ?e, "Failed to sign aggregates"); } + _ => {} } } diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 59e8524a1a..26ce052ea0 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -2,8 +2,8 @@ use crate::duties_service::DutiesService; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use bls::PublicKeyBytes; use eth2::types::BlockId; +use futures::StreamExt; use futures::future::FutureExt; -use futures::future::join_all; use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; @@ -17,7 +17,7 @@ use types::{ ChainSpec, EthSpec, Hash256, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, }; -use validator_store::{Error as ValidatorStoreError, ValidatorStore}; +use validator_store::{ContributionToSign, SyncMessageToSign, ValidatorStore}; pub const SUBSCRIPTION_LOOKAHEAD_EPOCHS: u64 = 4; @@ -247,78 +247,57 @@ impl SyncCommitteeService, ) -> Result<(), ()> { - // Create futures to produce sync committee signatures. - let signature_futures = validator_duties.iter().map(|duty| async move { - match self - .validator_store - .produce_sync_committee_signature( - slot, - beacon_block_root, - duty.validator_index, - &duty.pubkey, - ) - .await - { - Ok(signature) => Some(signature), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!( - ?pubkey, - validator_index = duty.validator_index, - %slot, - "Missing pubkey for sync committee signature" - ); - None + let messages_to_sign: Vec<_> = validator_duties + .iter() + .map(|duty| SyncMessageToSign { + slot, + beacon_block_root, + validator_index: duty.validator_index, + pubkey: duty.pubkey, + }) + .collect(); + + let signature_stream = self + .validator_store + .sign_sync_committee_signatures(messages_to_sign); + tokio::pin!(signature_stream); + + while let Some(result) = signature_stream.next().await { + match result { + Ok(committee_signatures) if !committee_signatures.is_empty() => { + let committee_signatures = &committee_signatures; + match self + .beacon_nodes + .request(ApiTopic::SyncCommittee, |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(committee_signatures) + .await + }) + .instrument(info_span!( + "publish_sync_signatures", + count = committee_signatures.len() + )) + .await + { + Ok(()) => info!( + count = committee_signatures.len(), + head_block = ?beacon_block_root, + %slot, + "Successfully published sync committee messages" + ), + Err(e) => error!( + %slot, + error = %e, + "Unable to publish sync committee messages" + ), + } } Err(e) => { - crit!( - validator_index = duty.validator_index, - %slot, - error = ?e, - "Failed to sign sync committee signature" - ); - None + crit!(%slot, error = ?e, "Failed to sign sync committee signatures"); } + _ => {} } - }); - - // Execute all the futures in parallel, collecting any successful results. - let committee_signatures = &join_all(signature_futures) - .instrument(info_span!( - "sign_sync_signatures", - count = validator_duties.len() - )) - .await - .into_iter() - .flatten() - .collect::>(); - - self.beacon_nodes - .request(ApiTopic::SyncCommittee, |beacon_node| async move { - beacon_node - .post_beacon_pool_sync_committee_signatures(committee_signatures) - .await - }) - .instrument(info_span!( - "publish_sync_signatures", - count = committee_signatures.len() - )) - .await - .map_err(|e| { - error!( - %slot, - error = %e, - "Unable to publish sync committee messages" - ); - })?; - - info!( - count = committee_signatures.len(), - head_block = ?beacon_block_root, - %slot, - "Successfully published sync committee messages" - ); + } Ok(()) } @@ -389,77 +368,61 @@ impl SyncCommitteeService Some(signed_contribution), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!(?pubkey, %slot, "Missing pubkey for sync contribution"); - None - } - Err(e) => { - crit!( + let contributions_to_sign: Vec<_> = subnet_aggregators + .into_iter() + .map( + |(aggregator_index, aggregator_pk, selection_proof)| ContributionToSign { + aggregator_index, + aggregator_pubkey: aggregator_pk, + contribution: contribution.clone(), + selection_proof, + }, + ) + .collect(); + + let contribution_stream = self + .validator_store + .sign_sync_committee_contributions(contributions_to_sign); + tokio::pin!(contribution_stream); + + while let Some(result) = contribution_stream.next().await { + match result { + Ok(signed_contributions) if !signed_contributions.is_empty() => { + let signed_contributions = &signed_contributions; + // Publish to the beacon node. + match self + .beacon_nodes + .first_success(|beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions) + .await + }) + .instrument(info_span!( + "publish_sync_contributions", + count = signed_contributions.len() + )) + .await + { + Ok(()) => info!( + subnet = %subnet_id, + beacon_block_root = %beacon_block_root, + num_signers = contribution.aggregation_bits.num_set_bits(), %slot, - error = ?e, - "Unable to sign sync committee contribution" - ); - None + "Successfully published sync contributions" + ), + Err(e) => error!( + %slot, + error = %e, + "Unable to publish signed contributions and proofs" + ), } } - }, - ); - - // Execute all the futures in parallel, collecting any successful results. - let signed_contributions = &join_all(signature_futures) - .instrument(info_span!( - "sign_sync_contributions", - count = aggregator_count - )) - .await - .into_iter() - .flatten() - .collect::>(); - - // Publish to the beacon node. - self.beacon_nodes - .first_success(|beacon_node| async move { - beacon_node - .post_validator_contribution_and_proofs(signed_contributions) - .await - }) - .instrument(info_span!( - "publish_sync_contributions", - count = signed_contributions.len() - )) - .await - .map_err(|e| { - error!( - %slot, - error = %e, - "Unable to publish signed contributions and proofs" - ); - })?; - - info!( - subnet = %subnet_id, - beacon_block_root = %beacon_block_root, - num_signers = contribution.aggregation_bits.num_set_bits(), - %slot, - "Successfully published sync contributions" - ); + Err(e) => { + crit!(%slot, error = ?e, "Failed to sign sync committee contributions"); + } + _ => {} + } + } Ok(()) } diff --git a/validator_client/validator_store/Cargo.toml b/validator_client/validator_store/Cargo.toml index 8b1879c837..2c6a68d494 100644 --- a/validator_client/validator_store/Cargo.toml +++ b/validator_client/validator_store/Cargo.toml @@ -7,5 +7,6 @@ authors = ["Sigma Prime "] [dependencies] bls = { workspace = true } eth2 = { workspace = true } +futures = { workspace = true } slashing_protection = { workspace = true } types = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 87ab669e8d..da0b33de18 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,5 +1,6 @@ use bls::{PublicKeyBytes, Signature}; use eth2::types::{FullBlockContents, PublishBlockRequest}; +use futures::Stream; use slashing_protection::NotSafe; use std::fmt::Debug; use std::future::Future; @@ -32,6 +33,38 @@ impl From for Error { } } +/// Input for batch attestation signing +pub struct AttestationToSign { + pub validator_index: u64, + pub pubkey: PublicKeyBytes, + pub validator_committee_index: usize, + pub attestation: Attestation, +} + +/// Input for batch aggregate signing +pub struct AggregateToSign { + pub pubkey: PublicKeyBytes, + pub aggregator_index: u64, + pub aggregate: Attestation, + pub selection_proof: SelectionProof, +} + +/// Input for batch sync committee message signing +pub struct SyncMessageToSign { + pub slot: Slot, + pub beacon_block_root: Hash256, + pub validator_index: u64, + pub pubkey: PublicKeyBytes, +} + +/// Input for batch sync committee contribution signing +pub struct ContributionToSign { + pub aggregator_index: u64, + pub aggregator_pubkey: PublicKeyBytes, + pub contribution: SyncCommitteeContribution, + pub selection_proof: SyncSelectionProof, +} + /// A helper struct, used for passing data from the validator store to services. pub struct ProposalData { pub validator_index: Option, @@ -106,13 +139,9 @@ pub trait ValidatorStore: Send + Sync { /// Sign a batch of `attestations` and apply slashing protection to them. /// - /// Only successfully signed attestations that pass slashing protection are returned, along with - /// the validator index of the signer. Eventually this will be replaced by `SingleAttestation` - /// use. - /// - /// Input: - /// - /// * Vec of (validator_index, pubkey, validator_committee_index, attestation). + /// Returns a stream of batches of successfully signed attestations. Each batch contains + /// attestations that passed slashing protection, along with the validator index of the signer. + /// Eventually this will be replaced by `SingleAttestation` use. /// /// Output: /// @@ -120,26 +149,14 @@ pub trait ValidatorStore: Send + Sync { #[allow(clippy::type_complexity)] fn sign_attestations( self: &Arc, - attestations: Vec<(u64, PublicKeyBytes, usize, Attestation)>, - ) -> impl Future)>, Error>> + Send; + attestations: Vec>, + ) -> impl Stream)>, Error>> + Send; fn sign_validator_registration_data( &self, validator_registration_data: ValidatorRegistrationData, ) -> impl Future>> + Send; - /// Signs an `AggregateAndProof` for a given validator. - /// - /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be - /// modified by actors other than the signing validator. - fn produce_signed_aggregate_and_proof( - &self, - validator_pubkey: PublicKeyBytes, - aggregator_index: u64, - aggregate: Attestation, - selection_proof: SelectionProof, - ) -> impl Future, Error>> + Send; - /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to /// `validator_pubkey`. fn produce_selection_proof( @@ -156,21 +173,23 @@ pub trait ValidatorStore: Send + Sync { subnet_id: SyncSubnetId, ) -> impl Future>> + Send; - fn produce_sync_committee_signature( - &self, - slot: Slot, - beacon_block_root: Hash256, - validator_index: u64, - validator_pubkey: &PublicKeyBytes, - ) -> impl Future>> + Send; + /// Sign a batch of aggregate and proofs and return results as a stream of batches. + fn sign_aggregate_and_proofs( + self: &Arc, + aggregates: Vec>, + ) -> impl Stream>, Error>> + Send; - fn produce_signed_contribution_and_proof( - &self, - aggregator_index: u64, - aggregator_pubkey: PublicKeyBytes, - contribution: SyncCommitteeContribution, - selection_proof: SyncSelectionProof, - ) -> impl Future, Error>> + Send; + /// Sign a batch of sync committee messages and return results as a stream of batches. + fn sign_sync_committee_signatures( + self: &Arc, + messages: Vec, + ) -> impl Stream, Error>> + Send; + + /// Sign a batch of sync committee contributions and return results as a stream of batches. + fn sign_sync_committee_contributions( + self: &Arc, + contributions: Vec>, + ) -> impl Stream>, Error>> + Send; /// Prune the slashing protection database so that it remains performant. /// diff --git a/wordlist.txt b/wordlist.txt index e0e1fe7d73..822e336146 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -58,6 +58,7 @@ JSON KeyManager Kurtosis LMDB +LLM LLVM LRU LTO