diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a8919337a9..cdec442276 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,3 @@ /beacon_node/network/ @jxs /beacon_node/lighthouse_network/ @jxs +/beacon_node/store/ @michaelsproul diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 1cd2f24548..5cffb4e2fd 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -20,7 +20,7 @@ jobs: - name: Build Docker image run: | - docker build --build-arg FEATURES=portable -t lighthouse:local . + docker build --build-arg FEATURES=portable,spec-minimal -t lighthouse:local . docker save lighthouse:local -o lighthouse-docker.tar - name: Upload Docker image artifact @@ -52,23 +52,22 @@ jobs: - name: Load Docker image run: docker load -i lighthouse-docker.tar - - name: Start local testnet - run: ./start_local_testnet.sh -e local -c -b false && sleep 60 + - name: Start local testnet with Assertoor + run: ./start_local_testnet.sh -e local-assertoor -c -a -b false && sleep 60 working-directory: scripts/local_testnet + - name: Await Assertoor test result + id: assertoor_test_result + uses: ethpandaops/assertoor-github-action@v1 + with: + kurtosis_enclave_name: local-assertoor + - name: Stop local testnet and dump logs - run: ./stop_local_testnet.sh local - working-directory: scripts/local_testnet - - - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -e local-blinded -c -p -b false && sleep 60 - working-directory: scripts/local_testnet - - - name: Stop local testnet and dump logs - run: ./stop_local_testnet.sh local-blinded + run: ./stop_local_testnet.sh local-assertoor working-directory: scripts/local_testnet - name: Upload logs artifact + if: always() uses: actions/upload-artifact@v4 with: name: logs-local-testnet @@ -76,6 +75,29 @@ jobs: scripts/local_testnet/logs retention-days: 3 + - name: Return Assertoor test result + shell: bash + run: | + test_result="${{ steps.assertoor_test_result.outputs.result }}" + test_status=$( + cat <<"EOF" + ${{ steps.assertoor_test_result.outputs.test_overview }} + EOF + ) + failed_test_status=$( + cat <<"EOF" + ${{ steps.assertoor_test_result.outputs.failed_test_details }} + EOF + ) + + echo "Test Result: $test_result" + echo "$test_status" + if ! [ "$test_result" == "success" ]; then + echo "Failed Test Task Status:" + echo "$failed_test_status" + exit 1 + fi + doppelganger-protection-success-test: needs: dockerfile-ubuntu runs-on: ubuntu-22.04 @@ -104,6 +126,7 @@ jobs: working-directory: scripts/tests - name: Upload logs artifact + if: always() uses: actions/upload-artifact@v4 with: name: logs-doppelganger-protection-success @@ -139,6 +162,7 @@ jobs: working-directory: scripts/tests - name: Upload logs artifact + if: always() uses: actions/upload-artifact@v4 with: name: logs-doppelganger-protection-failure @@ -146,6 +170,91 @@ jobs: scripts/local_testnet/logs retention-days: 3 + # Tests checkpoint syncing to a live network (current fork) and a running devnet (usually next scheduled fork) + checkpoint-sync-test: + name: checkpoint-sync-test-${{ matrix.network }} + runs-on: ubuntu-latest + needs: dockerfile-ubuntu + if: contains(github.event.pull_request.labels.*.name, 'syncing') + continue-on-error: true + strategy: + matrix: + network: [sepolia, devnet] + steps: + - uses: actions/checkout@v4 + + - name: Install Kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar + + - name: Run the checkpoint sync test script + run: | + ./checkpoint-sync.sh "sync-${{ matrix.network }}" "checkpoint-sync-config-${{ matrix.network }}.yaml" + working-directory: scripts/tests + + - name: Upload logs artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: logs-checkpoint-sync-${{ matrix.network }} + path: | + scripts/local_testnet/logs + retention-days: 3 + + # Test syncing from genesis on a local testnet. Aims to cover forward syncing both short and long distances. + genesis-sync-test: + name: genesis-sync-test-${{ matrix.fork }}-${{ matrix.offline_secs }}s + runs-on: ubuntu-latest + needs: dockerfile-ubuntu + if: contains(github.event.pull_request.labels.*.name, 'syncing') + strategy: + matrix: + fork: [electra, fulu] + offline_secs: [120, 300] + steps: + - uses: actions/checkout@v4 + + - name: Install Kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar + + - name: Run the genesis sync test script + run: | + ./genesis-sync.sh "sync-${{ matrix.fork }}-${{ matrix.offline_secs }}s" "genesis-sync-config-${{ matrix.fork }}.yaml" "${{ matrix.fork }}" "${{ matrix.offline_secs }}" + working-directory: scripts/tests + + - name: Upload logs artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: logs-genesis-sync-${{ matrix.fork }}-${{ matrix.offline_secs }}s + path: | + scripts/local_testnet/logs + retention-days: 3 # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. @@ -161,4 +270,6 @@ jobs: steps: - uses: actions/checkout@v4 - name: Check that success job is dependent on all others - run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success + run: | + exclude_jobs='checkpoint-sync-test|genesis-sync-test' + ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success "$exclude_jobs" diff --git a/Cargo.lock b/Cargo.lock index 30be5fa233..7d77ce4044 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,16 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "account_manager" version = "0.3.5" @@ -612,6 +602,28 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "async-trait" version = "0.1.88" @@ -704,6 +716,53 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.2", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -792,7 +851,6 @@ dependencies = [ "bls", "criterion", "derivative", - "eth1", "eth2", "eth2_network_config", "ethereum_hashing", @@ -813,6 +871,8 @@ dependencies = [ "maplit", "merkle_proof", "metrics", + "mockall", + "mockall_double", "once_cell", "oneshot_broadcast", "operation_pool", @@ -882,6 +942,7 @@ dependencies = [ "eth2", "futures", "itertools 0.10.5", + "sensitive_url", "serde", "slot_clock", "strum", @@ -1251,20 +1312,6 @@ dependencies = [ "serde", ] -[[package]] -name = "cargo_metadata" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.26", - "serde", - "serde_json", - "thiserror 1.0.69", -] - [[package]] name = "cargo_metadata" version = "0.19.2" @@ -1479,7 +1526,6 @@ dependencies = [ "directory", "dirs", "environment", - "eth1", "eth2", "eth2_config", "ethereum_ssz", @@ -1620,6 +1666,45 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console-api" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857" +dependencies = [ + "futures-core", + "prost", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures-task", + "hdrhistogram", + "humantime", + "hyper-util", + "prost", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "const-hex" version = "1.14.0" @@ -2376,18 +2461,18 @@ dependencies = [ "validator_store", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "dtoa" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "ecdsa" version = "0.14.8" @@ -2460,6 +2545,8 @@ dependencies = [ "bls", "compare_fields", "compare_fields_derive", + "context_deserialize", + "context_deserialize_derive", "derivative", "eth2_network_config", "ethereum_ssz", @@ -2672,48 +2759,6 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "eth1" -version = "0.2.0" -dependencies = [ - "environment", - "eth1_test_rig", - "eth2", - "ethereum_ssz", - "ethereum_ssz_derive", - "execution_layer", - "futures", - "logging", - "merkle_proof", - "metrics", - "parking_lot 0.12.3", - "sensitive_url", - "serde", - "serde_yaml", - "state_processing", - "superstruct", - "task_executor", - "tokio", - "tracing", - "tree_hash", - "types", -] - -[[package]] -name = "eth1_test_rig" -version = "0.2.0" -dependencies = [ - "deposit_contract", - "ethers-contract", - "ethers-core", - "ethers-providers", - "hex", - "serde_json", - "tokio", - "types", - "unused_port", -] - [[package]] name = "eth2" version = "0.1.0" @@ -2996,8 +3041,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" dependencies = [ - "ethers-contract-abigen", - "ethers-contract-derive", "ethers-core", "ethers-providers", "futures-util", @@ -3009,46 +3052,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "ethers-contract-abigen" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" -dependencies = [ - "Inflector", - "cfg-if", - "dunce", - "ethers-core", - "eyre", - "getrandom 0.2.16", - "hex", - "proc-macro2", - "quote", - "regex", - "reqwest", - "serde", - "serde_json", - "syn 1.0.109", - "toml", - "url", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f192e8e4cf2b038318aae01e94e7644e0659a76219e94bcd3203df744341d61f" -dependencies = [ - "ethers-contract-abigen", - "ethers-core", - "hex", - "proc-macro2", - "quote", - "serde_json", - "syn 1.0.109", -] - [[package]] name = "ethers-core" version = "1.0.2" @@ -3057,7 +3060,6 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", - "cargo_metadata 0.15.4", "chrono", "convert_case 0.6.0", "elliptic-curve 0.12.3", @@ -3065,7 +3067,6 @@ dependencies = [ "generic-array 0.14.7", "hex", "k256 0.11.6", - "once_cell", "open-fastrlp", "proc-macro2", "rand 0.8.5", @@ -3291,16 +3292,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", -] - [[package]] name = "fake-simd" version = "0.1.2" @@ -3504,6 +3495,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" + [[package]] name = "fs2" version = "0.4.3" @@ -3709,19 +3706,12 @@ dependencies = [ name = "genesis" version = "0.2.0" dependencies = [ - "environment", - "eth1", - "eth1_test_rig", "ethereum_hashing", "ethereum_ssz", - "futures", "int_to_bytes", - "logging", "merkle_proof", "rayon", - "sensitive_url", "state_processing", - "tokio", "tracing", "tree_hash", "types", @@ -3950,6 +3940,19 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.7", + "byteorder", + "flate2", + "nom", + "num-traits", +] + [[package]] name = "headers" version = "0.3.9" @@ -4188,7 +4191,6 @@ dependencies = [ "bytes", "directory", "either", - "eth1", "eth2", "ethereum_serde_utils", "ethereum_ssz", @@ -4325,6 +4327,19 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper 1.6.0", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -4658,12 +4673,6 @@ dependencies = [ "syn 2.0.101", ] -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -5482,10 +5491,10 @@ dependencies = [ "boot_node", "clap", "clap_utils", + "console-subscriber", "database_manager", "directory", "environment", - "eth1", "eth2", "eth2_network_config", "ethereum_hashing", @@ -5787,6 +5796,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -5932,6 +5947,44 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9366861eb2a2c436c20b12c8dbec5f798cea6b47ad99216be0282942e2c81ea0" +[[package]] +name = "mockall" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "mockall_double" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1ca96e5ac35256ae3e13536edd39b172b88f41615e1d7b653c8ad24524113e8" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "mockito" version = "1.7.0" @@ -6898,6 +6951,32 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty_reqwest_error" version = "0.1.0" @@ -7087,6 +7166,38 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + [[package]] name = "proto_array" version = "0.2.0" @@ -7491,7 +7602,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration 0.5.1", "tokio", "tokio-native-tls", @@ -8391,9 +8502,7 @@ name = "simulator" version = "0.2.0" dependencies = [ "clap", - "env_logger 0.9.3", "environment", - "eth2_network_config", "execution_layer", "futures", "kzg", @@ -8755,6 +8864,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + [[package]] name = "synstructure" version = "0.13.2" @@ -8902,6 +9017,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "test_random_derive" version = "0.2.0" @@ -9120,6 +9241,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", + "tracing", "windows-sys 0.52.0", ] @@ -9202,15 +9324,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - [[package]] name = "toml_datetime" version = "0.6.9" @@ -9239,6 +9352,76 @@ dependencies = [ "winnow 0.7.10", ] +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -10563,7 +10746,7 @@ dependencies = [ name = "workspace_members" version = "0.1.0" dependencies = [ - "cargo_metadata 0.19.2", + "cargo_metadata", "quote", ] diff --git a/Cargo.toml b/Cargo.toml index 9d7407d9ee..6a7b2f610e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,6 @@ members = [ "beacon_node/beacon_processor", "beacon_node/builder_client", "beacon_node/client", - "beacon_node/eth1", "beacon_node/execution_layer", "beacon_node/genesis", "beacon_node/http_api", @@ -72,7 +71,6 @@ members = [ "slasher", "slasher/service", "testing/ef_tests", - "testing/eth1_test_rig", "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", @@ -124,6 +122,7 @@ clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } compare_fields = { path = "common/compare_fields" } compare_fields_derive = { path = "common/compare_fields_derive" } +console-subscriber = "0.4" context_deserialize = { path = "consensus/context_deserialize" } context_deserialize_derive = { path = "consensus/context_deserialize_derive" } criterion = "0.5" @@ -137,8 +136,6 @@ doppelganger_service = { path = "validator_client/doppelganger_service" } either = "1.9" env_logger = "0.9" environment = { path = "lighthouse/environment" } -eth1 = { path = "beacon_node/eth1" } -eth1_test_rig = { path = "testing/eth1_test_rig" } eth2 = { path = "common/eth2" } eth2_config = { path = "common/eth2_config" } eth2_key_derivation = { path = "crypto/eth2_key_derivation" } @@ -188,6 +185,8 @@ maplit = "1" merkle_proof = { path = "consensus/merkle_proof" } metrics = { path = "common/metrics" } milhouse = "0.5" +mockall = "0.13" +mockall_double = "0.3" mockito = "1.5.0" monitoring_api = { path = "common/monitoring_api" } network = { path = "beacon_node/network" } diff --git a/Makefile b/Makefile index fe5dfbe551..75b6811b74 100644 --- a/Makefile +++ b/Makefile @@ -218,6 +218,9 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Downloads and runs the nightly EF test vectors. +test-ef-nightly: make-ef-tests-nightly run-ef-tests + # Downloads and runs the EF test vectors with nextest. nextest-ef: make-ef-tests nextest-run-ef-tests @@ -278,6 +281,10 @@ lint-full: make-ef-tests: make -C $(EF_TESTS) +# Download/extract the nightly EF test vectors. +make-ef-tests-nightly: + CONSENSUS_SPECS_TEST_VERSION=nightly make -C $(EF_TESTS) + # Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES) diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 4d2353b553..b985484d11 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -32,7 +32,7 @@ pub fn cli_app() -> Command { .about( "Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \ requesting passwords interactively. The directory flag provides a convenient \ - method for importing a directory of keys generated by the eth2-deposit-cli \ + method for importing a directory of keys generated by the ethstaker-deposit-cli \ Python utility.", ) .arg( diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index bcd860a484..57d532d0ae 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -90,7 +90,7 @@ pub fn cli_run( let slashing_protection_database = SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| { format!( - "Unable to open database at {}: {:?}", + "Unable to open slashing protection database at {}: {:?}", slashing_protection_db_path.display(), e ) @@ -198,7 +198,7 @@ pub fn cli_run( let slashing_protection_database = SlashingDatabase::open(&slashing_protection_db_path) .map_err(|e| { format!( - "Unable to open database at {}: {:?}", + "Unable to open slashing protection database at {}: {:?}", slashing_protection_db_path.display(), e ) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index bbe7fad6af..fbc58eafc8 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -1,3 +1,4 @@ + [package] name = "beacon_chain" version = "0.2.0" @@ -18,7 +19,6 @@ alloy-primitives = { workspace = true } bitvec = { workspace = true } bls = { workspace = true } derivative = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } @@ -69,6 +69,8 @@ types = { workspace = true } [dev-dependencies] criterion = { workspace = true } maplit = { workspace = true } +mockall = { workspace = true } +mockall_double = { workspace = true } serde_json = { workspace = true } [[bench]] diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index d69667f3de..f057c0619d 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -38,6 +38,7 @@ use crate::{ metrics, observed_aggregates::{ObserveOutcome, ObservedAttestationKey}, observed_attesters::Error as ObservedAttestersError, + single_attestation::single_attestation_to_attestation, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::verify_signature_sets; @@ -202,12 +203,6 @@ pub enum Error { /// /// The peer has sent an invalid message. NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex }, - /// The unaggregated attestation doesn't have only one aggregation bit set. - /// - /// ## Peer scoring - /// - /// The peer has sent an invalid message. - NotExactlyOneAggregationBitSet(usize), /// The attestation doesn't have only one aggregation bit set. /// /// ## Peer scoring @@ -304,9 +299,9 @@ struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { /// /// These attestations have *not* undergone signature verification. struct IndexedUnaggregatedAttestation<'a, T: BeaconChainTypes> { - attestation: AttestationRef<'a, T::EthSpec>, + attestation: &'a SingleAttestation, indexed_attestation: IndexedAttestation, - subnet_id: SubnetId, + subnet_id: Option, validator_index: u64, } @@ -323,12 +318,13 @@ impl VerifiedAggregatedAttestation<'_, T> { } } +#[derive(Clone)] /// Wraps an `Attestation` that has been fully verified for propagation on the gossip network. pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> { - attestation: AttestationRef<'a, T::EthSpec>, + attestation: Attestation, + single_attestation: &'a SingleAttestation, indexed_attestation: IndexedAttestation, subnet_id: SubnetId, - validator_index: usize, } impl VerifiedUnaggregatedAttestation<'_, T> { @@ -336,13 +332,8 @@ impl VerifiedUnaggregatedAttestation<'_, T> { self.indexed_attestation } - pub fn single_attestation(&self) -> Option { - Some(SingleAttestation { - committee_index: self.attestation.committee_index()?, - attester_index: self.validator_index as u64, - data: self.attestation.data().clone(), - signature: self.attestation.signature().clone(), - }) + pub fn single_attestation(&self) -> SingleAttestation { + self.single_attestation.clone() } } @@ -362,7 +353,7 @@ impl Clone for IndexedUnaggregatedAttestation<'_, T> { /// A helper trait implemented on wrapper types that can be progressed to a state where they can be /// verified for application to fork choice. pub trait VerifiedAttestation: Sized { - fn attestation(&self) -> AttestationRef; + fn attestation(&self) -> AttestationRef<'_, T::EthSpec>; fn indexed_attestation(&self) -> &IndexedAttestation; @@ -375,7 +366,7 @@ pub trait VerifiedAttestation: Sized { } impl VerifiedAttestation for VerifiedAggregatedAttestation<'_, T> { - fn attestation(&self) -> AttestationRef { + fn attestation(&self) -> AttestationRef<'_, T::EthSpec> { self.attestation() } @@ -385,8 +376,8 @@ impl VerifiedAttestation for VerifiedAggregatedAttestati } impl VerifiedAttestation for VerifiedUnaggregatedAttestation<'_, T> { - fn attestation(&self) -> AttestationRef { - self.attestation + fn attestation(&self) -> AttestationRef<'_, T::EthSpec> { + self.attestation.to_ref() } fn indexed_attestation(&self) -> &IndexedAttestation { @@ -400,6 +391,8 @@ pub enum AttestationSlashInfo<'a, T: BeaconChainTypes, TErr> { SignatureNotChecked(AttestationRef<'a, T::EthSpec>, TErr), /// As for `SignatureNotChecked`, but we know the `IndexedAttestation`. SignatureNotCheckedIndexed(IndexedAttestation, TErr), + /// As for `SignatureNotChecked`, but for the `SingleAttestation`. + SignatureNotCheckedSingle(&'a SingleAttestation, TErr), /// The attestation's signature is invalid, so it will never be slashable. SignatureInvalid(TErr), /// The signature is valid but the attestation is invalid in some other way. @@ -438,6 +431,20 @@ fn process_slash_info( } } } + SignatureNotCheckedSingle(attestation, err) => { + if let Error::UnknownHeadBlock { .. } = err { + if attestation.data.beacon_block_root == attestation.data.target.root { + return err; + } + } + + let fork_name = chain + .spec + .fork_name_at_slot::(attestation.data.slot); + + let indexed_attestation = attestation.to_indexed(fork_name); + (indexed_attestation, true, err) + } SignatureNotCheckedIndexed(indexed, err) => (indexed, true, err), SignatureInvalid(e) => return e, SignatureValid(indexed, err) => (indexed, false, err), @@ -461,6 +468,7 @@ fn process_slash_info( match slash_info { SignatureNotChecked(_, e) | SignatureNotCheckedIndexed(_, e) + | SignatureNotCheckedSingle(_, e) | SignatureInvalid(e) | SignatureValid(_, e) => e, } @@ -561,7 +569,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. - let head_block = verify_head_block_is_known(chain, attestation, None)?; + let head_block = verify_head_block_is_known(chain, attestation.data(), None)?; // Check the attestation target root is consistent with the head root. // @@ -570,7 +578,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // // Whilst this attestation *technically* could be used to add value to a block, it is // invalid in the spirit of the protocol. Here we choose safety over profit. - verify_attestation_target_root::(&head_block, attestation)?; + verify_attestation_target_root::(&head_block, attestation.data())?; // Ensure that the attestation has participants. if attestation.is_aggregation_bits_zero() { @@ -813,16 +821,16 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { /// Run the checks that happen before an indexed attestation is constructed. pub fn verify_early_checks( - attestation: AttestationRef, + attestation: &'a SingleAttestation, chain: &BeaconChain, ) -> Result<(), Error> { - let attestation_epoch = attestation.data().slot.epoch(T::EthSpec::slots_per_epoch()); + let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); // Check the attestation's epoch matches its target. - if attestation_epoch != attestation.data().target.epoch { + if attestation_epoch != attestation.data.target.epoch { return Err(Error::InvalidTargetEpoch { - slot: attestation.data().slot, - epoch: attestation.data().target.epoch, + slot: attestation.data.slot, + epoch: attestation.data.target.epoch, }); } @@ -832,61 +840,44 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // We do not queue future attestations for later processing. verify_propagation_slot_range::<_, T::EthSpec>( &chain.slot_clock, - attestation.data(), + &attestation.data, &chain.spec, )?; - // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one - // aggregation bit set. - let num_aggregation_bits = attestation.num_set_aggregation_bits(); - if num_aggregation_bits != 1 { - return Err(Error::NotExactlyOneAggregationBitSet(num_aggregation_bits)); + let fork_name = chain + .spec + .fork_name_at_slot::(attestation.data.slot); + if fork_name.electra_enabled() { + // [New in Electra:EIP7549] + if attestation.data.index != 0 { + return Err(Error::CommitteeIndexNonZero( + attestation.data.index as usize, + )); + } } - // [New in Electra:EIP7549] - verify_committee_index(attestation)?; - // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. // // Enforce a maximum skip distance for unaggregated attestations. - let head_block = - verify_head_block_is_known(chain, attestation, chain.config.import_max_skip_slots)?; + let head_block = verify_head_block_is_known( + chain, + &attestation.data, + chain.config.import_max_skip_slots, + )?; // Check the attestation target root is consistent with the head root. - verify_attestation_target_root::(&head_block, attestation)?; + verify_attestation_target_root::(&head_block, &attestation.data)?; Ok(()) } /// Run the checks that apply to the indexed attestation before the signature is checked. pub fn verify_middle_checks( - attestation: AttestationRef, - indexed_attestation: &IndexedAttestation, - committees_per_slot: u64, - subnet_id: Option, + attestation: &'a SingleAttestation, chain: &BeaconChain, - ) -> Result<(u64, SubnetId), Error> { - let expected_subnet_id = SubnetId::compute_subnet_for_attestation::( - attestation, - committees_per_slot, - &chain.spec, - ) - .map_err(BeaconChainError::from)?; - - // If a subnet was specified, ensure that subnet is correct. - if let Some(subnet_id) = subnet_id { - if subnet_id != expected_subnet_id { - return Err(Error::InvalidSubnetId { - received: subnet_id, - expected: expected_subnet_id, - }); - } - }; - - let validator_index = *indexed_attestation - .attesting_indices_first() - .ok_or(Error::NotExactlyOneAggregationBitSet(0))?; + ) -> Result { + let validator_index = attestation.attester_index; /* * The attestation is the first valid attestation received for the participating validator @@ -895,16 +886,16 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { if chain .observed_gossip_attesters .read() - .validator_has_been_observed(attestation.data().target.epoch, validator_index as usize) + .validator_has_been_observed(attestation.data.target.epoch, validator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { validator_index, - epoch: attestation.data().target.epoch, + epoch: attestation.data.target.epoch, }); } - Ok((validator_index, expected_subnet_id)) + Ok(validator_index) } /// Returns `Ok(Self)` if the `attestation` is valid to be (re)published on the gossip @@ -913,11 +904,11 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { /// `subnet_id` is the subnet from which we received this attestation. This function will /// verify that it was received on the correct subnet. pub fn verify( - attestation: &'a Attestation, + attestation: &'a SingleAttestation, subnet_id: Option, chain: &BeaconChain, ) -> Result { - Self::verify_slashable(attestation.to_ref(), subnet_id, chain) + Self::verify_slashable(attestation, subnet_id, chain) .inspect(|verified_unaggregated| { if let Some(slasher) = chain.slasher.as_ref() { slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone()); @@ -928,31 +919,23 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { /// Verify the attestation, producing extra information about whether it might be slashable. pub fn verify_slashable( - attestation: AttestationRef<'a, T::EthSpec>, + attestation: &'a SingleAttestation, subnet_id: Option, chain: &BeaconChain, ) -> Result> { use AttestationSlashInfo::*; if let Err(e) = Self::verify_early_checks(attestation, chain) { - return Err(SignatureNotChecked(attestation, e)); + return Err(SignatureNotCheckedSingle(attestation, e)); } - let (indexed_attestation, committees_per_slot) = - match obtain_indexed_attestation_and_committees_per_slot(chain, attestation) { - Ok(x) => x, - Err(e) => { - return Err(SignatureNotChecked(attestation, e)); - } - }; + let fork_name = chain + .spec + .fork_name_at_slot::(attestation.data.slot); - let (validator_index, expected_subnet_id) = match Self::verify_middle_checks( - attestation, - &indexed_attestation, - committees_per_slot, - subnet_id, - chain, - ) { + let indexed_attestation = attestation.to_indexed(fork_name); + + let validator_index = match Self::verify_middle_checks(attestation, chain) { Ok(t) => t, Err(e) => return Err(SignatureNotCheckedIndexed(indexed_attestation, e)), }; @@ -960,7 +943,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { Ok(Self { attestation, indexed_attestation, - subnet_id: expected_subnet_id, + subnet_id, validator_index, }) } @@ -977,10 +960,55 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { /// Run the checks that apply after the signature has been checked. fn verify_late_checks( - attestation: AttestationRef, + attestation: &'a SingleAttestation, validator_index: u64, + subnet_id: Option, chain: &BeaconChain, - ) -> Result<(), Error> { + ) -> Result<(Attestation, SubnetId), Error> { + // Check that the attester is a member of the committee + let (committee_opt, committees_per_slot) = chain.with_committee_cache( + attestation.data.target.root, + attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()), + |committee_cache, _| { + let committee_opt = committee_cache + .get_beacon_committee(attestation.data.slot, attestation.committee_index) + .map(|beacon_committee| beacon_committee.committee.to_vec()); + + Ok((committee_opt, committee_cache.committees_per_slot())) + }, + )?; + + let Some(committee) = committee_opt else { + return Err(Error::NoCommitteeForSlotAndIndex { + slot: attestation.data.slot, + index: attestation.committee_index, + }); + }; + + if !committee.contains(&(attestation.attester_index as usize)) { + return Err(Error::AttesterNotInCommittee { + attester_index: attestation.attester_index, + committee_index: attestation.committee_index, + slot: attestation.data.slot, + }); + } + + let expected_subnet_id = SubnetId::compute_subnet_for_single_attestation::( + attestation, + committees_per_slot, + &chain.spec, + ) + .map_err(BeaconChainError::from)?; + + // If a subnet was specified, ensure that subnet is correct. + if let Some(subnet_id) = subnet_id { + if subnet_id != expected_subnet_id { + return Err(Error::InvalidSubnetId { + received: subnet_id, + expected: expected_subnet_id, + }); + } + }; // Now that the attestation has been fully verified, store that we have received a valid // attestation from this validator. // @@ -990,20 +1018,28 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { if chain .observed_gossip_attesters .write() - .observe_validator(attestation.data().target.epoch, validator_index as usize) + .observe_validator(attestation.data.target.epoch, validator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { validator_index, - epoch: attestation.data().target.epoch, + epoch: attestation.data.target.epoch, }); } - Ok(()) + + let fork_name = chain + .spec + .fork_name_at_slot::(attestation.data.slot); + + let unaggregated_attestation = + single_attestation_to_attestation(attestation, &committee, fork_name)?; + + Ok((unaggregated_attestation, expected_subnet_id)) } /// Verify the `unaggregated_attestation`. pub fn verify( - unaggregated_attestation: &'a Attestation, + unaggregated_attestation: &'a SingleAttestation, subnet_id: Option, chain: &BeaconChain, ) -> Result { @@ -1054,15 +1090,17 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { CheckAttestationSignature::No => (), }; - if let Err(e) = Self::verify_late_checks(attestation, validator_index, chain) { - return Err(SignatureValid(indexed_attestation, e)); - } + let (unaggregated_attestation, subnet_id) = + match Self::verify_late_checks(attestation, validator_index, subnet_id, chain) { + Ok(a) => a, + Err(e) => return Err(SignatureValid(indexed_attestation, e)), + }; Ok(Self { - attestation, + single_attestation: attestation, + attestation: unaggregated_attestation, indexed_attestation, subnet_id, - validator_index: validator_index as usize, }) } @@ -1071,11 +1109,6 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { self.subnet_id } - /// Returns the wrapped `attestation`. - pub fn attestation(&self) -> AttestationRef { - self.attestation - } - /// Returns the wrapped `indexed_attestation`. pub fn indexed_attestation(&self) -> &IndexedAttestation { &self.indexed_attestation @@ -1102,40 +1135,40 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { /// already finalized. fn verify_head_block_is_known( chain: &BeaconChain, - attestation: AttestationRef, + attestation_data: &AttestationData, max_skip_slots: Option, ) -> Result { let block_opt = chain .canonical_head .fork_choice_read_lock() - .get_block(&attestation.data().beacon_block_root) + .get_block(&attestation_data.beacon_block_root) .or_else(|| { chain .early_attester_cache - .get_proto_block(attestation.data().beacon_block_root) + .get_proto_block(attestation_data.beacon_block_root) }); if let Some(block) = block_opt { // Reject any block that exceeds our limit on skipped slots. if let Some(max_skip_slots) = max_skip_slots { - if attestation.data().slot > block.slot + max_skip_slots { + if attestation_data.slot > block.slot + max_skip_slots { return Err(Error::TooManySkippedSlots { head_block_slot: block.slot, - attestation_slot: attestation.data().slot, + attestation_slot: attestation_data.slot, }); } } - if !verify_attestation_is_finalized_checkpoint_or_descendant(attestation.data(), chain) { + if !verify_attestation_is_finalized_checkpoint_or_descendant(attestation_data, chain) { return Err(Error::HeadBlockFinalized { - beacon_block_root: attestation.data().beacon_block_root, + beacon_block_root: attestation_data.beacon_block_root, }); } Ok(block) - } else if chain.is_pre_finalization_block(attestation.data().beacon_block_root)? { + } else if chain.is_pre_finalization_block(attestation_data.beacon_block_root)? { Err(Error::HeadBlockFinalized { - beacon_block_root: attestation.data().beacon_block_root, + beacon_block_root: attestation_data.beacon_block_root, }) } else { // The block is either: @@ -1145,7 +1178,7 @@ fn verify_head_block_is_known( // 2) A post-finalization block that we don't know about yet. We'll queue // the attestation until the block becomes available (or we time out). Err(Error::UnknownHeadBlock { - beacon_block_root: attestation.data().beacon_block_root, + beacon_block_root: attestation_data.beacon_block_root, }) } } @@ -1237,11 +1270,11 @@ pub fn verify_attestation_signature( /// `attestation.data.beacon_block_root`. pub fn verify_attestation_target_root( head_block: &ProtoBlock, - attestation: AttestationRef, + attestation_data: &AttestationData, ) -> Result<(), Error> { // Check the attestation target root. let head_block_epoch = head_block.slot.epoch(E::slots_per_epoch()); - let attestation_epoch = attestation.data().slot.epoch(E::slots_per_epoch()); + let attestation_epoch = attestation_data.slot.epoch(E::slots_per_epoch()); if head_block_epoch > attestation_epoch { // The epoch references an invalid head block from a future epoch. // @@ -1254,7 +1287,7 @@ pub fn verify_attestation_target_root( // Reference: // https://github.com/ethereum/eth2.0-specs/pull/2001#issuecomment-699246659 return Err(Error::InvalidTargetRoot { - attestation: attestation.data().target.root, + attestation: attestation_data.target.root, // It is not clear what root we should expect in this case, since the attestation is // fundamentally invalid. expected: None, @@ -1273,9 +1306,9 @@ pub fn verify_attestation_target_root( }; // Reject any attestation with an invalid target root. - if target_root != attestation.data().target.root { + if target_root != attestation_data.target.root { return Err(Error::InvalidTargetRoot { - attestation: attestation.data().target.root, + attestation: attestation_data.target.root, expected: Some(target_root), }); } diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 5f856140ba..266279432e 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -136,7 +136,7 @@ pub fn batch_verify_unaggregated_attestations<'a, T, I>( ) -> Result, Error>>, Error> where T: BeaconChainTypes, - I: Iterator, Option)> + ExactSizeIterator, + I: Iterator)> + ExactSizeIterator, { let mut num_partially_verified = 0; let mut num_failed = 0; diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index ae715afcd0..34a528f212 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -365,11 +365,7 @@ impl AttesterCache { value: AttesterCacheValue, ) { while cache.len() >= MAX_CACHE_LEN { - if let Some(oldest) = cache - .iter() - .map(|(key, _)| *key) - .min_by_key(|key| key.epoch) - { + if let Some(oldest) = cache.keys().copied().min_by_key(|key| key.epoch) { cache.remove(&oldest); } else { break; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c1d30253a3..de377dab97 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -27,8 +27,6 @@ use crate::data_availability_checker::{ use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; -use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; -use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fetch_blobs::EngineGetBlobsOutput; @@ -58,12 +56,14 @@ use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::PersistedBeaconChain; +use crate::persisted_custody::persist_custody_context; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; +use crate::validator_custody::CustodyContextSsz; use crate::validator_monitor::{ get_slot_delay_ms, timestamp_now, ValidatorMonitor, HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, @@ -73,7 +73,9 @@ use crate::{ kzg_utils, metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, }; -use eth2::types::{EventKind, SseBlobSidecar, SseBlock, SseExtendedPayloadAttributes}; +use eth2::types::{ + EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, +}; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, @@ -120,7 +122,7 @@ use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, HotStateSummary, - KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; @@ -139,7 +141,6 @@ type HashBlockTuple = (Hash256, RpcBlock); // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO; pub const OP_POOL_DB_KEY: Hash256 = Hash256::ZERO; -pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; /// Defines how old a block can be before it's no longer a candidate for the early attester cache. @@ -308,7 +309,6 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type HotStore: store::ItemStore; type ColdStore: store::ItemStore; type SlotClock: slot_clock::SlotClock; - type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; } @@ -432,8 +432,6 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen BLS to execution changes for. pub observed_bls_to_execution_changes: Mutex>, - /// Provides information from the Ethereum 1 (PoW) chain. - pub eth1_chain: Option>, /// Interfaces with the execution client. pub execution_layer: Option>, /// Stores information about the canonical head and finalized/justified checkpoints of the @@ -456,8 +454,6 @@ pub struct BeaconChain { pub event_handler: Option>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: RwLock, - /// A cache of eth1 deposit data at epoch boundaries for deposit finalization - pub eth1_finalization_cache: RwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. @@ -656,14 +652,19 @@ impl BeaconChain { Ok(()) } - /// Persists `self.eth1_chain` and its caches to disk. - pub fn persist_eth1_cache(&self) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::PERSIST_ETH1_CACHE); + /// Persists the custody information to disk. + pub fn persist_custody_context(&self) -> Result<(), Error> { + let custody_context: CustodyContextSsz = self + .data_availability_checker + .custody_context() + .as_ref() + .into(); + debug!(?custody_context, "Persisting custody context to store"); - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - self.store - .put_item(Ð1_CACHE_DB_KEY, ð1_chain.as_ssz_container())?; - } + persist_custody_context::( + self.store.clone(), + custody_context, + )?; Ok(()) } @@ -2053,7 +2054,7 @@ impl BeaconChain { AttestationError, > where - I: Iterator, Option)> + ExactSizeIterator, + I: Iterator)> + ExactSizeIterator, { batch_verify_unaggregated_attestations(attestations, self) } @@ -2065,7 +2066,7 @@ impl BeaconChain { /// aggregation bit set. pub fn verify_unaggregated_attestation_for_gossip<'a>( &self, - unaggregated_attestation: &'a Attestation, + unaggregated_attestation: &'a SingleAttestation, subnet_id: Option, ) -> Result, AttestationError> { metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS); @@ -2081,13 +2082,9 @@ impl BeaconChain { .spec .fork_name_at_slot::(v.attestation().data().slot); if current_fork.electra_enabled() { - // I don't see a situation where this could return None. The upstream unaggregated attestation checks - // should have already verified that this is an attestation with a single committee bit set. - if let Some(single_attestation) = v.single_attestation() { - event_handler.register(EventKind::SingleAttestation(Box::new( - single_attestation, - ))); - } + event_handler.register(EventKind::SingleAttestation(Box::new( + v.single_attestation(), + ))); } } @@ -2377,13 +2374,10 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. - if self.eth1_chain.is_some() { - let (attestation, attesting_indices) = - verified_attestation.into_attestation_and_indices(); - self.op_pool - .insert_attestation(attestation, attesting_indices) - .map_err(Error::from)?; - } + let (attestation, attesting_indices) = verified_attestation.into_attestation_and_indices(); + self.op_pool + .insert_attestation(attestation, attesting_indices) + .map_err(Error::from)?; Ok(()) } @@ -2399,11 +2393,9 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. - if self.eth1_chain.is_some() { - self.op_pool - .insert_sync_contribution(contribution.contribution()) - .map_err(Error::from)?; - } + self.op_pool + .insert_sync_contribution(contribution.contribution()) + .map_err(Error::from)?; Ok(()) } @@ -2539,9 +2531,7 @@ impl BeaconChain { /// Accept a pre-verified exit and queue it for inclusion in an appropriate block. pub fn import_voluntary_exit(&self, exit: SigVerifiedOp) { - if self.eth1_chain.is_some() { - self.op_pool.insert_voluntary_exit(exit) - } + self.op_pool.insert_voluntary_exit(exit) } /// Verify a proposer slashing before allowing it to propagate on the gossip network. @@ -2571,9 +2561,7 @@ impl BeaconChain { } } - if self.eth1_chain.is_some() { - self.op_pool.insert_proposer_slashing(proposer_slashing) - } + self.op_pool.insert_proposer_slashing(proposer_slashing) } /// Verify an attester slashing before allowing it to propagate on the gossip network. @@ -2612,9 +2600,7 @@ impl BeaconChain { } // Add to the op pool (if we have the ability to propose blocks). - if self.eth1_chain.is_some() { - self.op_pool.insert_attester_slashing(attester_slashing) - } + self.op_pool.insert_attester_slashing(attester_slashing) } /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. @@ -2686,12 +2672,8 @@ impl BeaconChain { } } - if self.eth1_chain.is_some() { - self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) - } else { - false - } + self.op_pool + .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) } /// Attempt to obtain sync committee duties from the head. @@ -2988,7 +2970,6 @@ impl BeaconChain { pub async fn verify_block_for_gossip( self: &Arc, block: Arc>, - custody_columns_count: usize, ) -> Result, BlockError> { let chain = self.clone(); self.task_executor @@ -2998,7 +2979,7 @@ impl BeaconChain { let slot = block.slot(); let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); - match GossipVerifiedBlock::new(block, &chain, custody_columns_count) { + match GossipVerifiedBlock::new(block, &chain) { Ok(verified) => { let commitments_formatted = verified.block.commitments_formatted(); debug!( @@ -3087,6 +3068,11 @@ impl BeaconChain { return Err(BlockError::DuplicateFullyImported(block_root)); } + self.emit_sse_data_column_sidecar_events( + &block_root, + data_columns.iter().map(|column| column.as_data_column()), + ); + let r = self .check_gossip_data_columns_availability_and_import( slot, @@ -3146,7 +3132,7 @@ impl BeaconChain { self: &Arc, slot: Slot, block_root: Hash256, - engine_get_blobs_output: EngineGetBlobsOutput, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. @@ -3158,10 +3144,16 @@ impl BeaconChain { return Err(BlockError::DuplicateFullyImported(block_root)); } - // process_engine_blobs is called for both pre and post PeerDAS. However, post PeerDAS - // consumers don't expect the blobs event to fire erratically. - if let EngineGetBlobsOutput::Blobs(blobs) = &engine_get_blobs_output { - self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); + match &engine_get_blobs_output { + EngineGetBlobsOutput::Blobs(blobs) => { + self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().map(|b| b.as_blob())); + } + EngineGetBlobsOutput::CustodyColumns(columns) => { + self.emit_sse_data_column_sidecar_events( + &block_root, + columns.iter().map(|column| column.as_data_column()), + ); + } } let r = self @@ -3191,6 +3183,31 @@ impl BeaconChain { } } + fn emit_sse_data_column_sidecar_events<'a, I>( + self: &Arc, + block_root: &Hash256, + data_columns_iter: I, + ) where + I: Iterator>, + { + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_data_column_sidecar_subscribers() { + let imported_data_columns = self + .data_availability_checker + .cached_data_column_indexes(block_root) + .unwrap_or_default(); + let new_data_columns = + data_columns_iter.filter(|b| !imported_data_columns.contains(&b.index)); + + for data_column in new_data_columns { + event_handler.register(EventKind::DataColumnSidecar( + SseDataColumnSidecar::from_data_column_sidecar(data_column), + )); + } + } + } + } + /// Cache the columns in the processing cache, process it, then evict it from the cache if it was /// imported or errors. pub async fn process_rpc_custody_columns( @@ -3231,6 +3248,11 @@ impl BeaconChain { } } + self.emit_sse_data_column_sidecar_events( + &block_root, + custody_columns.iter().map(|column| column.as_ref()), + ); + let r = self .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) .await; @@ -3545,7 +3567,9 @@ impl BeaconChain { if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(blob.signed_block_header()); } - let availability = self.data_availability_checker.put_gossip_blob(blob)?; + let availability = self + .data_availability_checker + .put_gossip_verified_blobs(blob.block_root(), std::iter::once(blob))?; self.process_availability(slot, availability, || Ok(())) .await @@ -3568,21 +3592,21 @@ impl BeaconChain { let availability = self .data_availability_checker - .put_gossip_data_columns(block_root, data_columns)?; + .put_gossip_verified_data_columns(block_root, data_columns)?; self.process_availability(slot, availability, publish_fn) .await } - fn check_blobs_for_slashability( + fn check_blobs_for_slashability<'a>( self: &Arc, block_root: Hash256, - blobs: &FixedBlobSidecarList, + blobs: impl IntoIterator>, ) -> Result<(), BlockError> { let mut slashable_cache = self.observed_slashable.write(); for header in blobs - .iter() - .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) + .into_iter() + .map(|b| b.signed_block_header.clone()) .unique() { if verify_header_signature::(self, &header).is_ok() { @@ -3609,7 +3633,7 @@ impl BeaconChain { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result { - self.check_blobs_for_slashability(block_root, &blobs)?; + self.check_blobs_for_slashability(block_root, blobs.iter().flatten().map(Arc::as_ref))?; let availability = self .data_availability_checker .put_rpc_blobs(block_root, blobs)?; @@ -3622,18 +3646,21 @@ impl BeaconChain { self: &Arc, slot: Slot, block_root: Hash256, - engine_get_blobs_output: EngineGetBlobsOutput, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { let availability = match engine_get_blobs_output { EngineGetBlobsOutput::Blobs(blobs) => { - self.check_blobs_for_slashability(block_root, &blobs)?; + self.check_blobs_for_slashability(block_root, blobs.iter().map(|b| b.as_blob()))?; self.data_availability_checker - .put_engine_blobs(block_root, blobs)? + .put_gossip_verified_blobs(block_root, blobs)? } EngineGetBlobsOutput::CustodyColumns(data_columns) => { - self.check_columns_for_slashability(block_root, &data_columns)?; + self.check_columns_for_slashability( + block_root, + data_columns.iter().map(|c| c.as_data_column()), + )?; self.data_availability_checker - .put_engine_data_columns(block_root, data_columns)? + .put_kzg_verified_custody_data_columns(block_root, data_columns)? } }; @@ -3649,7 +3676,10 @@ impl BeaconChain { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result { - self.check_columns_for_slashability(block_root, &custody_columns)?; + self.check_columns_for_slashability( + block_root, + custody_columns.iter().map(|c| c.as_ref()), + )?; // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. @@ -3661,16 +3691,21 @@ impl BeaconChain { .await } - fn check_columns_for_slashability( + fn check_columns_for_slashability<'a>( self: &Arc, block_root: Hash256, - custody_columns: &DataColumnSidecarList, + custody_columns: impl IntoIterator>, ) -> Result<(), BlockError> { let mut slashable_cache = self.observed_slashable.write(); - // Assumes all items in custody_columns are for the same block_root - if let Some(column) = custody_columns.first() { - let header = &column.signed_block_header; - if verify_header_signature::(self, header).is_ok() { + // Process all unique block headers - previous logic assumed all headers were identical and + // only processed the first one. However, we should not make assumptions about data received + // from RPC. + for header in custody_columns + .into_iter() + .map(|c| c.signed_block_header.clone()) + .unique() + { + if verify_header_signature::(self, &header).is_ok() { slashable_cache .observe_slashable( header.message.slot, @@ -3679,7 +3714,7 @@ impl BeaconChain { ) .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; if let Some(slasher) = self.slasher.as_ref() { - slasher.accept_block_header(header.clone()); + slasher.accept_block_header(header); } } } @@ -3722,7 +3757,6 @@ impl BeaconChain { block_root, state, parent_block, - parent_eth1_finalization_data, consensus_context, } = import_data; @@ -3748,7 +3782,6 @@ impl BeaconChain { state, payload_verification_outcome.payload_verification_status, parent_block, - parent_eth1_finalization_data, consensus_context, ) }, @@ -3785,7 +3818,6 @@ impl BeaconChain { mut state: BeaconState, payload_verification_status: PayloadVerificationStatus, parent_block: SignedBlindedBeaconBlock, - parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- @@ -3973,8 +4005,6 @@ impl BeaconChain { ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); - let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { error!( msg = "Restoring fork choice from disk", @@ -3986,7 +4016,6 @@ impl BeaconChain { .err() .unwrap_or(e.into())); } - drop(txn_lock); // The fork choice write-lock is dropped *after* the on-disk database has been updated. // This prevents inconsistency between the two at the expense of concurrency. @@ -3996,12 +4025,6 @@ impl BeaconChain { // about it. let block_time_imported = timestamp_now(); - let current_eth1_finalization_data = Eth1FinalizationData { - eth1_data: state.eth1_data().clone(), - eth1_deposit_index: state.eth1_deposit_index(), - }; - let current_finalized_checkpoint = state.finalized_checkpoint(); - // compute state proofs for light client updates before inserting the state into the // snapshot cache. if self.config.enable_light_client_server { @@ -4020,17 +4043,6 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - // Update the deposit contract cache. - self.import_block_update_deposit_contract_finalization( - block, - block_root, - current_epoch, - current_finalized_checkpoint, - current_eth1_finalization_data, - parent_eth1_finalization_data, - parent_block.slot(), - ); - // Inform the unknown block cache, in case it was waiting on this block. self.pre_finalization_block_cache .block_processed(block_root); @@ -4427,65 +4439,6 @@ impl BeaconChain { Ok(()) } - #[allow(clippy::too_many_arguments)] - fn import_block_update_deposit_contract_finalization( - &self, - block: BeaconBlockRef, - block_root: Hash256, - current_epoch: Epoch, - current_finalized_checkpoint: Checkpoint, - current_eth1_finalization_data: Eth1FinalizationData, - parent_eth1_finalization_data: Eth1FinalizationData, - parent_block_slot: Slot, - ) { - // Do not write to eth1 finalization cache for blocks older than 5 epochs. - if block.epoch() + 5 < current_epoch { - return; - } - - let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch()); - if parent_block_epoch < current_epoch { - // we've crossed epoch boundary, store Eth1FinalizationData - let (checkpoint, eth1_finalization_data) = - if block.slot() % T::EthSpec::slots_per_epoch() == 0 { - // current block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block_root, - }, - current_eth1_finalization_data, - ) - } else { - // parent block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block.parent_root(), - }, - parent_eth1_finalization_data, - ) - }; - - let finalized_eth1_data = { - let mut cache = self.eth1_finalization_cache.write(); - cache.insert(checkpoint, eth1_finalization_data); - cache.finalize(¤t_finalized_checkpoint) - }; - if let Some(finalized_eth1_data) = finalized_eth1_data { - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - let finalized_deposit_count = finalized_eth1_data.deposit_count; - eth1_chain.finalize_eth1_data(finalized_eth1_data); - debug!( - epoch = %current_finalized_checkpoint.epoch, - deposit_count = %finalized_deposit_count, - "called eth1_chain.finalize_eth1_data()" - ); - } - } - } - } - /// If configured, wait for the fork choice run at the start of the slot to complete. fn wait_for_fork_choice_before_block_production( self: &Arc, @@ -5223,11 +5176,6 @@ impl BeaconChain { builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { - let eth1_chain = self - .eth1_chain - .as_ref() - .ok_or(BlockProductionError::NoEth1ChainConnection)?; - // It is invalid to try to produce a block using a state from a future slot. if state.slot() > produce_at_slot { return Err(BlockProductionError::StateSlotTooHigh { @@ -5292,9 +5240,9 @@ impl BeaconChain { let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); - let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; + let eth1_data = state.eth1_data().clone(); - let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + let deposits = vec![]; let bls_to_execution_changes = self .op_pool @@ -6781,13 +6729,22 @@ impl BeaconChain { #[allow(clippy::type_complexity)] pub fn chain_dump( &self, + ) -> Result>>, Error> { + self.chain_dump_from_slot(Slot::new(0)) + } + + /// As for `chain_dump` but dumping only the portion of the chain newer than `from_slot`. + #[allow(clippy::type_complexity)] + pub fn chain_dump_from_slot( + &self, + from_slot: Slot, ) -> Result>>, Error> { let mut dump = vec![]; let mut prev_block_root = None; let mut prev_beacon_state = None; - for res in self.forwards_iter_block_roots(Slot::new(0))? { + for res in self.forwards_iter_block_roots(from_slot)? { let (beacon_block_root, _) = res?; // Do not include snapshots at skipped slots. @@ -7123,7 +7080,7 @@ impl BeaconChain { &self, block_root: Hash256, block_data: AvailableBlockData, - ) -> Result>, String> { + ) -> Result>, String> { match block_data { AvailableBlockData::NoData => Ok(None), AvailableBlockData::Blobs(blobs) => { @@ -7176,7 +7133,7 @@ impl Drop for BeaconChain { let drop = || -> Result<(), Error> { self.persist_fork_choice()?; self.persist_op_pool()?; - self.persist_eth1_cache() + self.persist_custody_context() }; if let Err(e) = drop() { diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 56b13b0b77..12970214c6 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -181,7 +181,7 @@ pub fn compute_proposer_duties_from_head( ensure_state_is_in_epoch(&mut state, head_state_root, request_epoch, &chain.spec)?; let indices = state - .get_beacon_proposer_indices(&chain.spec) + .get_beacon_proposer_indices(request_epoch, &chain.spec) .map_err(BeaconChainError::from)?; let dependent_root = state diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6fe710f41a..a78224fb70 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -166,6 +166,16 @@ pub struct GossipVerifiedBlob, } +impl Clone for GossipVerifiedBlob { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + blob: self.blob.clone(), + _phantom: PhantomData, + } + } +} + impl GossipVerifiedBlob { pub fn new( blob: Arc>, @@ -335,21 +345,9 @@ impl KzgVerifiedBlobList { } /// Create a `KzgVerifiedBlobList` from `blobs` that are already KZG verified. - /// - /// This should be used with caution, as used incorrectly it could result in KZG verification - /// being skipped and invalid blobs being deemed valid. - pub fn from_verified>>>( - blobs: I, - seen_timestamp: Duration, - ) -> Self { + pub fn from_verified>>(blobs: I) -> Self { Self { - verified_blobs: blobs - .into_iter() - .map(|blob| KzgVerifiedBlob { - blob, - seen_timestamp, - }) - .collect(), + verified_blobs: blobs.into_iter().collect(), } } } @@ -525,7 +523,8 @@ pub fn validate_blob_sidecar_for_gossip { pub block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, - custody_columns_count: usize, } /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit @@ -721,7 +729,6 @@ pub trait IntoGossipVerifiedBlock: Sized { fn into_gossip_verified_block( self, chain: &BeaconChain, - custody_columns_count: usize, ) -> Result, BlockError>; fn inner_block(&self) -> Arc>; } @@ -730,7 +737,6 @@ impl IntoGossipVerifiedBlock for GossipVerifiedBlock fn into_gossip_verified_block( self, _chain: &BeaconChain, - _custody_columns_count: usize, ) -> Result, BlockError> { Ok(self) } @@ -743,9 +749,8 @@ impl IntoGossipVerifiedBlock for Arc, - custody_columns_count: usize, ) -> Result, BlockError> { - GossipVerifiedBlock::new(self, chain, custody_columns_count) + GossipVerifiedBlock::new(self, chain) } fn inner_block(&self) -> Arc> { @@ -821,7 +826,6 @@ impl GossipVerifiedBlock { pub fn new( block: Arc>, chain: &BeaconChain, - custody_columns_count: usize, ) -> Result { // If the block is valid for gossip we don't supply it to the slasher here because // we assume it will be transformed into a fully verified block. We *do* need to supply @@ -831,14 +835,12 @@ impl GossipVerifiedBlock { // The `SignedBeaconBlock` and `SignedBeaconBlockHeader` have the same canonical root, // but it's way quicker to calculate root of the header since the hash of the tree rooted // at `BeaconBlockBody` is already computed in the header. - Self::new_without_slasher_checks(block, &header, chain, custody_columns_count).map_err( - |e| { - process_block_slash_info::<_, BlockError>( - chain, - BlockSlashInfo::from_early_error_block(header, e), - ) - }, - ) + Self::new_without_slasher_checks(block, &header, chain).map_err(|e| { + process_block_slash_info::<_, BlockError>( + chain, + BlockSlashInfo::from_early_error_block(header, e), + ) + }) } /// As for new, but doesn't pass the block to the slasher. @@ -846,7 +848,6 @@ impl GossipVerifiedBlock { block: Arc>, block_header: &SignedBeaconBlockHeader, chain: &BeaconChain, - custody_columns_count: usize, ) -> Result { // Ensure the block is the correct structure for the fork at `block.slot()`. block @@ -865,6 +866,21 @@ impl GossipVerifiedBlock { }); } + // Do not gossip blocks that claim to contain more blobs than the max allowed + // at the given block epoch. + if let Ok(commitments) = block.message().body().blob_kzg_commitments() { + let max_blobs_at_epoch = chain + .spec + .max_blobs_per_block(block.slot().epoch(T::EthSpec::slots_per_epoch())) + as usize; + if commitments.len() > max_blobs_at_epoch { + return Err(BlockError::InvalidBlobCount { + max_blobs_at_epoch, + block: commitments.len(), + }); + } + } + let block_root = get_block_header_root(block_header); // Do not gossip a block from a finalized slot. @@ -962,7 +978,8 @@ impl GossipVerifiedBlock { &chain.spec, )?; - let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let epoch = state.current_epoch(); + let proposers = state.get_beacon_proposer_indices(epoch, &chain.spec)?; let proposer_index = *proposers .get(block.slot().as_usize() % T::EthSpec::slots_per_epoch() as usize) .ok_or_else(|| BeaconChainError::NoProposerForSlot(block.slot()))?; @@ -1053,7 +1070,6 @@ impl GossipVerifiedBlock { block_root, parent, consensus_context, - custody_columns_count, }) } @@ -1201,7 +1217,6 @@ impl SignatureVerifiedBlock { block: MaybeAvailableBlock::AvailabilityPending { block_root: from.block_root, block, - custody_columns_count: from.custody_columns_count, }, block_root: from.block_root, parent: Some(parent), @@ -1451,11 +1466,6 @@ impl ExecutionPendingBlock { .into()); } - let parent_eth1_finalization_data = Eth1FinalizationData { - eth1_data: state.eth1_data().clone(), - eth1_deposit_index: state.eth1_deposit_index(), - }; - // Transition the parent state to the block slot. // // It is important to note that we're using a "pre-state" here, one that has potentially @@ -1476,28 +1486,19 @@ impl ExecutionPendingBlock { // processing, but we get early access to it. let state_root = state.update_tree_hash_cache()?; - // Store the state immediately. - let txn_lock = chain.store.hot_db.begin_rw_transaction(); + // Store the state immediately. States are ONLY deleted on finalization pruning, so + // we won't have race conditions where we should have written a state and didn't. let state_already_exists = chain.store.load_hot_state_summary(&state_root)?.is_some(); - let state_batch = if state_already_exists { + if state_already_exists { // If the state exists, we do not need to re-write it. - vec![] } else { - vec![if state.slot() % T::EthSpec::slots_per_epoch() == 0 { - StoreOp::PutState(state_root, &state) - } else { - StoreOp::PutStateSummary( - state_root, - HotStateSummary::new(&state_root, &state)?, - ) - }] + // Recycle store codepath to create a state summary and store the state / diff + let mut ops = vec![]; + chain.store.store_hot_state(&state_root, &state, &mut ops)?; + chain.store.hot_db.do_atomically(ops)?; }; - chain - .store - .do_atomically_with_block_and_blobs_cache(state_batch)?; - drop(txn_lock); state_root }; @@ -1664,7 +1665,6 @@ impl ExecutionPendingBlock { block_root, state, parent_block: parent.beacon_block, - parent_eth1_finalization_data, consensus_context, }, payload_verification_handle, @@ -2067,7 +2067,7 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobEr /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. pub fn get_validator_pubkey_cache( chain: &BeaconChain, -) -> Result>, BeaconChainError> { +) -> Result>, BeaconChainError> { Ok(chain.validator_pubkey_cache.read()) } diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index dab54dc823..5917e6f6be 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -1,7 +1,6 @@ use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; -use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; use state_processing::ConsensusContext; @@ -31,7 +30,6 @@ use types::{ pub struct RpcBlock { block_root: Hash256, block: RpcBlockInner, - custody_columns_count: usize, } impl Debug for RpcBlock { @@ -45,10 +43,6 @@ impl RpcBlock { self.block_root } - pub fn custody_columns_count(&self) -> usize { - self.custody_columns_count - } - pub fn as_block(&self) -> &SignedBeaconBlock { match &self.block { RpcBlockInner::Block(block) => block, @@ -103,14 +97,12 @@ impl RpcBlock { pub fn new_without_blobs( block_root: Option, block: Arc>, - custody_columns_count: usize, ) -> Self { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); Self { block_root, block: RpcBlockInner::Block(block), - custody_columns_count, } } @@ -152,8 +144,6 @@ impl RpcBlock { Ok(Self { block_root, block: inner, - // Block is before PeerDAS - custody_columns_count: 0, }) } @@ -161,7 +151,6 @@ impl RpcBlock { block_root: Option, block: Arc>, custody_columns: Vec>, - custody_columns_count: usize, spec: &ChainSpec, ) -> Result { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); @@ -182,7 +171,6 @@ impl RpcBlock { Ok(Self { block_root, block: inner, - custody_columns_count, }) } @@ -250,12 +238,10 @@ impl ExecutedBlock { MaybeAvailableBlock::AvailabilityPending { block_root: _, block: pending_block, - custody_columns_count, } => Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new( pending_block, import_data, payload_verification_outcome, - custody_columns_count, )), } } @@ -321,7 +307,6 @@ pub struct AvailabilityPendingExecutedBlock { pub block: Arc>, pub import_data: BlockImportData, pub payload_verification_outcome: PayloadVerificationOutcome, - pub custody_columns_count: usize, } impl AvailabilityPendingExecutedBlock { @@ -329,13 +314,11 @@ impl AvailabilityPendingExecutedBlock { block: Arc>, import_data: BlockImportData, payload_verification_outcome: PayloadVerificationOutcome, - custody_columns_count: usize, ) -> Self { Self { block, import_data, payload_verification_outcome, - custody_columns_count, } } @@ -357,7 +340,6 @@ pub struct BlockImportData { pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, - pub parent_eth1_finalization_data: Eth1FinalizationData, pub consensus_context: ConsensusContext, } @@ -371,10 +353,6 @@ impl BlockImportData { block_root, state, parent_block, - parent_eth1_finalization_data: Eth1FinalizationData { - eth1_data: <_>::default(), - eth1_deposit_index: 0, - }, consensus_context: ConsensusContext::new(Slot::new(0)), } } @@ -387,7 +365,7 @@ pub trait AsBlock { fn parent_root(&self) -> Hash256; fn state_root(&self) -> Hash256; fn signed_block_header(&self) -> SignedBeaconBlockHeader; - fn message(&self) -> BeaconBlockRef; + fn message(&self) -> BeaconBlockRef<'_, E>; fn as_block(&self) -> &SignedBeaconBlock; fn block_cloned(&self) -> Arc>; fn canonical_root(&self) -> Hash256; @@ -414,7 +392,7 @@ impl AsBlock for Arc> { SignedBeaconBlock::signed_block_header(self) } - fn message(&self) -> BeaconBlockRef { + fn message(&self) -> BeaconBlockRef<'_, E> { SignedBeaconBlock::message(self) } @@ -447,7 +425,7 @@ impl AsBlock for MaybeAvailableBlock { fn signed_block_header(&self) -> SignedBeaconBlockHeader { self.as_block().signed_block_header() } - fn message(&self) -> BeaconBlockRef { + fn message(&self) -> BeaconBlockRef<'_, E> { self.as_block().message() } fn as_block(&self) -> &SignedBeaconBlock { @@ -488,7 +466,7 @@ impl AsBlock for AvailableBlock { self.block().signed_block_header() } - fn message(&self) -> BeaconBlockRef { + fn message(&self) -> BeaconBlockRef<'_, E> { self.block().message() } @@ -521,7 +499,7 @@ impl AsBlock for RpcBlock { fn signed_block_header(&self) -> SignedBeaconBlockHeader { self.as_block().signed_block_header() } - fn message(&self) -> BeaconBlockRef { + fn message(&self) -> BeaconBlockRef<'_, E> { self.as_block().message() } fn as_block(&self) -> &SignedBeaconBlock { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 812dcbeda7..ce4264d550 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,10 +1,8 @@ use crate::beacon_chain::{ - CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY, + CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, OP_POOL_DB_KEY, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; -use crate::eth1_chain::{CachingEth1Backend, SszEth1}; -use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; @@ -13,15 +11,15 @@ use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::persisted_beacon_chain::PersistedBeaconChain; +use crate::persisted_custody::load_custody_context; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; +use crate::CustodyContext; use crate::{ - BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain, - Eth1ChainBackend, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, ServerSentEventHandler, }; -use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; @@ -42,29 +40,27 @@ use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, DataColumnSidecarList, Epoch, - EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, + FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing /// functionality and only exists to satisfy the type system. -pub struct Witness( - PhantomData<(TSlotClock, TEth1Backend, E, THotStore, TColdStore)>, +pub struct Witness( + PhantomData<(TSlotClock, E, THotStore, TColdStore)>, ); -impl BeaconChainTypes - for Witness +impl BeaconChainTypes + for Witness where THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { type HotStore = THotStore; type ColdStore = TColdStore; type SlotClock = TSlotClock; - type Eth1Chain = TEth1Backend; type EthSpec = E; } @@ -88,7 +84,6 @@ pub struct BeaconChainBuilder { ForkChoice, T::EthSpec>, >, op_pool: Option>, - eth1_chain: Option>, execution_layer: Option>, event_handler: Option>, slot_clock: Option, @@ -109,13 +104,12 @@ pub struct BeaconChainBuilder { rng: Option>, } -impl - BeaconChainBuilder> +impl + BeaconChainBuilder> where THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { /// Returns a new builder. @@ -131,7 +125,6 @@ where genesis_state_root: None, fork_choice: None, op_pool: None, - eth1_chain: None, execution_layer: None, event_handler: None, slot_clock: None, @@ -224,18 +217,6 @@ where self } - /// Attempt to load an existing eth1 cache from the builder's `Store`. - pub fn get_persisted_eth1_backend(&self) -> Result, String> { - let store = self - .store - .clone() - .ok_or("get_persisted_eth1_backend requires a store.")?; - - store - .get_item::(Ð1_CACHE_DB_KEY) - .map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e)) - } - /// Returns true if `self.store` contains a persisted beacon chain. pub fn store_contains_beacon_chain(&self) -> Result { let store = self @@ -268,16 +249,15 @@ where .to_string() })?; - let fork_choice = - BeaconChain::>::load_fork_choice( - store.clone(), - ResetPayloadStatuses::always_reset_conditionally( - self.chain_config.always_reset_payload_statuses, - ), - &self.spec, - ) - .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? - .ok_or("Fork choice not found in store")?; + let fork_choice = BeaconChain::>::load_fork_choice( + store.clone(), + ResetPayloadStatuses::always_reset_conditionally( + self.chain_config.always_reset_payload_statuses, + ), + &self.spec, + ) + .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? + .ok_or("Fork choice not found in store")?; let genesis_block = store .get_blinded_block(&chain.genesis_block_root) @@ -380,21 +360,29 @@ where } /// Starts a new chain from a genesis state. - pub fn genesis_state(mut self, beacon_state: BeaconState) -> Result { + pub fn genesis_state(mut self, mut beacon_state: BeaconState) -> Result { let store = self.store.clone().ok_or("genesis_state requires a store")?; + // Initialize anchor info before attempting to write the genesis state. + // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent + // historic states from being retained (unless `--reconstruct-historic-states` is set). + let retain_historic_states = self.chain_config.reconstruct_historic_states; + let genesis_beacon_block = genesis_block(&mut beacon_state, &self.spec)?; + self.pending_io_batch.push( + store + .init_anchor_info( + genesis_beacon_block.parent_root(), + genesis_beacon_block.slot(), + Slot::new(0), + retain_historic_states, + ) + .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, + ); + let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; self = updated_builder; // Stage the database's metadata fields for atomic storage when `build` is called. - // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent - // historic states from being retained (unless `--reconstruct-historic-states` is set). - let retain_historic_states = self.chain_config.reconstruct_historic_states; - self.pending_io_batch.push( - store - .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) - .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, - ); self.pending_io_batch.push( store .init_blob_info(genesis.beacon_block.slot()) @@ -486,27 +474,46 @@ where // Verify that blobs (if provided) match the block. if let Some(blobs) = &weak_subj_blobs { - let commitments = weak_subj_block - .message() - .body() - .blob_kzg_commitments() - .map_err(|e| format!("Blobs provided but block does not reference them: {e:?}"))?; - if blobs.len() != commitments.len() { - return Err(format!( - "Wrong number of blobs, expected: {}, got: {}", - commitments.len(), - blobs.len() - )); - } - if commitments - .iter() - .zip(blobs.iter()) - .any(|(commitment, blob)| *commitment != blob.kzg_commitment) - { - return Err("Checkpoint blob does not match block commitment".into()); + let fulu_enabled = weak_subj_block.fork_name_unchecked().fulu_enabled(); + if fulu_enabled && blobs.is_empty() { + // Blobs expected for this block, but the checkpoint server is not able to serve them. + // This is expected from Fulu, as only supernodes are able to serve blobs. + // We can consider using backfill to retrieve the data columns from the p2p network, + // but we can ignore this fow now until we have validator custody backfill + // implemented as we'll likely be able to reuse the logic. + // https://github.com/sigp/lighthouse/issues/6837 + } else { + let commitments = weak_subj_block + .message() + .body() + .blob_kzg_commitments() + .map_err(|e| { + format!("Blobs provided but block does not reference them: {e:?}") + })?; + if blobs.len() != commitments.len() { + return Err(format!( + "Wrong number of blobs, expected: {}, got: {}", + commitments.len(), + blobs.len() + )); + } + if commitments + .iter() + .zip(blobs.iter()) + .any(|(commitment, blob)| *commitment != blob.kzg_commitment) + { + return Err("Checkpoint blob does not match block commitment".into()); + } } } + debug!( + slot = %weak_subj_slot, + state_root = ?weak_subj_state_root, + block_root = ?weak_subj_block_root, + "Storing split from weak subjectivity state" + ); + // Set the store's split point *before* storing genesis so that genesis is stored // immediately in the freezer DB. store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); @@ -527,6 +534,26 @@ where .cold_db .do_atomically(block_root_batch) .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + debug!( + from = %weak_subj_block.slot(), + to_excl = %weak_subj_state.slot(), + block_root = ?weak_subj_block_root, + "Stored frozen block roots at skipped slots" + ); + + // Write the anchor to memory before calling `put_state` otherwise hot hdiff can't store + // states that do not align with the `start_slot` grid. + let retain_historic_states = self.chain_config.reconstruct_historic_states; + self.pending_io_batch.push( + store + .init_anchor_info( + weak_subj_block.parent_root(), + weak_subj_block.slot(), + weak_subj_slot, + retain_historic_states, + ) + .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, + ); // Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten // about on a crash restart. @@ -537,6 +564,8 @@ where weak_subj_state.clone(), ) .map_err(|e| format!("Failed to set checkpoint state as finalized state: {:?}", e))?; + // Note: post hot hdiff must update the anchor info before attempting to put_state otherwise + // the write will fail if the weak_subj_slot is not aligned with the snapshot moduli. store .put_state(&weak_subj_state_root, &weak_subj_state) .map_err(|e| format!("Failed to store weak subjectivity state: {e:?}"))?; @@ -566,13 +595,7 @@ where // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor // info or split point is written before the `PersistedBeaconChain`. - let retain_historic_states = self.chain_config.reconstruct_historic_states; self.pending_io_batch.push(store.store_split_in_batch()); - self.pending_io_batch.push( - store - .init_anchor_info(weak_subj_block.message(), retain_historic_states) - .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, - ); self.pending_io_batch.push( store .init_blob_info(weak_subj_block.slot()) @@ -584,13 +607,6 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); - // Store pruning checkpoint to prevent attempting to prune before the anchor state. - self.pending_io_batch - .push(store.pruning_checkpoint_store_op(Checkpoint { - root: weak_subj_block_root, - epoch: weak_subj_state.slot().epoch(E::slots_per_epoch()), - })); - let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, beacon_block: Arc::new(weak_subj_block), @@ -615,12 +631,6 @@ where Ok(self.empty_op_pool()) } - /// Sets the `BeaconChain` eth1 backend. - pub fn eth1_backend(mut self, backend: Option) -> Self { - self.eth1_chain = backend.map(Eth1Chain::new); - self - } - /// Sets the `BeaconChain` execution layer. pub fn execution_layer(mut self, execution_layer: Option>) -> Self { self.execution_layer = execution_layer; @@ -711,8 +721,7 @@ where #[allow(clippy::type_complexity)] // I think there's nothing to be gained here from a type alias. pub fn build( mut self, - ) -> Result>, String> - { + ) -> Result>, String> { let slot_clock = self .slot_clock .ok_or("Cannot build without a slot_clock.")?; @@ -868,12 +877,12 @@ where // This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance // doesn't write a `PersistedBeaconChain` without the rest of the batch. self.pending_io_batch.push(BeaconChain::< - Witness, + Witness, >::persist_head_in_batch_standalone( genesis_block_root )); self.pending_io_batch.push(BeaconChain::< - Witness, + Witness, >::persist_fork_choice_in_batch_standalone( &fork_choice )); @@ -914,6 +923,20 @@ where } }; + // Load the persisted custody context from the db and initialize + // the context for this run + let custody_context = if let Some(custody) = + load_custody_context::(store.clone()) + { + Arc::new(CustodyContext::new_from_persisted_custody_context( + custody, + self.import_all_data_columns, + )) + } else { + Arc::new(CustodyContext::new(self.import_all_data_columns)) + }; + debug!(?custody_context, "Loading persisted custody context"); + let beacon_chain = BeaconChain { spec: self.spec.clone(), config: self.chain_config, @@ -951,7 +974,6 @@ where observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), - eth1_chain: self.eth1_chain, execution_layer: self.execution_layer.clone(), genesis_validators_root, genesis_time, @@ -965,7 +987,6 @@ where shuffling_cache_size, head_shuffling_ids, )), - eth1_finalization_cache: RwLock::new(Eth1FinalizationCache::default()), beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), @@ -987,8 +1008,14 @@ where validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, data_availability_checker: Arc::new( - DataAvailabilityChecker::new(slot_clock, self.kzg.clone(), store, self.spec) - .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, + DataAvailabilityChecker::new( + slot_clock, + self.kzg.clone(), + store, + custody_context, + self.spec, + ) + .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), rng: Arc::new(Mutex::new(rng)), @@ -1064,35 +1091,11 @@ where } } -impl - BeaconChainBuilder, E, THotStore, TColdStore>> +impl + BeaconChainBuilder> where THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, - TSlotClock: SlotClock + 'static, - E: EthSpec + 'static, -{ - /// Do not use any eth1 backend. The client will not be able to produce beacon blocks. - pub fn no_eth1_backend(self) -> Self { - self.eth1_backend(None) - } - - /// Sets the `BeaconChain` eth1 back-end to produce predictably junk data when producing blocks. - pub fn dummy_eth1_backend(mut self) -> Result { - let backend = CachingEth1Backend::new(Eth1Config::default(), self.spec.clone())?; - - self.eth1_chain = Some(Eth1Chain::new_dummy(backend)); - - Ok(self) - } -} - -impl - BeaconChainBuilder> -where - THotStore: ItemStore + 'static, - TColdStore: ItemStore + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { /// Sets the `BeaconChain` slot clock to `TestingSlotClock`. @@ -1240,8 +1243,6 @@ mod test { .task_executor(runtime.task_executor.clone()) .genesis_state(genesis_state) .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build the dummy eth1 backend") .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index a6f5179fdc..f96b59aec4 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -73,11 +73,11 @@ impl CanonicalHeadRwLock { Self::from(RwLock::new(item)) } - fn read(&self) -> RwLockReadGuard { + fn read(&self) -> RwLockReadGuard<'_, T> { self.0.read() } - fn write(&self) -> RwLockWriteGuard { + fn write(&self) -> RwLockWriteGuard<'_, T> { self.0.write() } } @@ -369,7 +369,7 @@ impl CanonicalHead { /// /// This function is **not safe** to be public. See the module-level documentation for more /// information about protecting from deadlocks. - fn cached_head_read_lock(&self) -> RwLockReadGuard> { + fn cached_head_read_lock(&self) -> RwLockReadGuard<'_, CachedHead> { self.cached_head.read() } @@ -377,18 +377,18 @@ impl CanonicalHead { /// /// This function is **not safe** to be public. See the module-level documentation for more /// information about protecting from deadlocks. - fn cached_head_write_lock(&self) -> RwLockWriteGuard> { + fn cached_head_write_lock(&self) -> RwLockWriteGuard<'_, CachedHead> { self.cached_head.write() } /// Access a read-lock for fork choice. - pub fn fork_choice_read_lock(&self) -> RwLockReadGuard> { + pub fn fork_choice_read_lock(&self) -> RwLockReadGuard<'_, BeaconForkChoice> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_READ_LOCK_AQUIRE_TIMES); self.fork_choice.read() } /// Access a write-lock for fork choice. - pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard> { + pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard<'_, BeaconForkChoice> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES); self.fork_choice.write() } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 6f292f3551..1bc95c22ac 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -5,7 +5,7 @@ use crate::block_verification_types::{ use crate::data_availability_checker::overflow_lru_cache::{ DataAvailabilityCheckerInner, ReconstructColumnsDecision, }; -use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore}; +use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore, CustodyContext}; use kzg::Kzg; use slot_clock::SlotClock; use std::fmt; @@ -17,7 +17,7 @@ use task_executor::TaskExecutor; use tracing::{debug, error, info_span, Instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ - BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, Hash256, + BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, }; @@ -32,6 +32,7 @@ use crate::data_column_verification::{ use crate::metrics::{ KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, }; +use crate::observed_data_sidecars::ObservationStrategy; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; @@ -73,6 +74,7 @@ pub struct DataAvailabilityChecker { availability_cache: Arc>, slot_clock: T::SlotClock, kzg: Arc, + custody_context: Arc, spec: Arc, } @@ -110,17 +112,28 @@ impl DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Arc, store: BeaconStore, + custody_context: Arc, spec: Arc, ) -> Result { - let inner = DataAvailabilityCheckerInner::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?; + let inner = DataAvailabilityCheckerInner::new( + OVERFLOW_LRU_CAPACITY, + store, + custody_context.clone(), + spec.clone(), + )?; Ok(Self { availability_cache: Arc::new(inner), slot_clock, kzg, + custody_context, spec, }) } + pub fn custody_context(&self) -> Arc { + self.custody_context.clone() + } + /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. pub fn get_execution_valid_block( @@ -155,6 +168,21 @@ impl DataAvailabilityChecker { }) } + /// Check if the exact data column is in the availability cache. + pub fn is_data_column_cached( + &self, + block_root: &Hash256, + data_column: &DataColumnSidecar, + ) -> bool { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.is_some_and(|components| { + let cached_column_opt = components.get_cached_data_column(data_column.index); + cached_column_opt.is_some_and(|cached| *cached == *data_column) + }) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -206,8 +234,9 @@ impl DataAvailabilityChecker { custody_columns: DataColumnSidecarList, ) -> Result, AvailabilityCheckError> { // Attributes fault to the specific peer that sent an invalid column - let kzg_verified_columns = KzgVerifiedDataColumn::from_batch(custody_columns, &self.kzg) - .map_err(AvailabilityCheckError::InvalidColumn)?; + let kzg_verified_columns = + KzgVerifiedDataColumn::from_batch_with_scoring(custody_columns, &self.kzg) + .map_err(AvailabilityCheckError::InvalidColumn)?; let verified_custody_columns = kzg_verified_columns .into_iter() @@ -218,65 +247,21 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, verified_custody_columns) } - /// Put a list of blobs received from the EL pool into the availability cache. - /// - /// This DOES NOT perform KZG verification because the KZG proofs should have been constructed - /// immediately prior to calling this function so they are assumed to be valid. - pub fn put_engine_blobs( - &self, - block_root: Hash256, - blobs: FixedBlobSidecarList, - ) -> Result, AvailabilityCheckError> { - let seen_timestamp = self - .slot_clock - .now_duration() - .ok_or(AvailabilityCheckError::SlotClockError)?; - self.availability_cache.put_kzg_verified_blobs( - block_root, - KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), - ) - } - - /// Put a list of data columns computed from blobs received from the EL pool into the - /// availability cache. - /// - /// This DOES NOT perform KZG proof and inclusion proof verification because - /// - The KZG proofs should have been verified by the trusted EL. - /// - The KZG commitments inclusion proof should have been constructed immediately prior to - /// calling this function so they are assumed to be valid. - /// - /// This method is used if the EL already has the blobs and returns them via the `getBlobsV2` - /// engine method. - /// More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). - pub fn put_engine_data_columns( - &self, - block_root: Hash256, - data_columns: DataColumnSidecarList, - ) -> Result, AvailabilityCheckError> { - let kzg_verified_custody_columns = data_columns - .into_iter() - .map(|d| { - KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::from_verified(d), - ) - }) - .collect::>(); - - self.availability_cache - .put_kzg_verified_data_columns(block_root, kzg_verified_custody_columns) - } - /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. /// /// This should only accept gossip verified blobs, so we should not have to worry about dupes. - pub fn put_gossip_blob( + pub fn put_gossip_verified_blobs< + I: IntoIterator>, + O: ObservationStrategy, + >( &self, - gossip_blob: GossipVerifiedBlob, + block_root: Hash256, + blobs: I, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()]) + .put_kzg_verified_blobs(block_root, blobs.into_iter().map(|b| b.into_inner())) } /// Check if we've cached other data columns for this block. If it satisfies the custody requirement and we also @@ -284,13 +269,15 @@ impl DataAvailabilityChecker { /// Otherwise cache the data column sidecar. /// /// This should only accept gossip verified data columns, so we should not have to worry about dupes. - #[allow(clippy::type_complexity)] - pub fn put_gossip_data_columns( + pub fn put_gossip_verified_data_columns< + O: ObservationStrategy, + I: IntoIterator>, + >( &self, block_root: Hash256, - gossip_data_columns: Vec>, + data_columns: I, ) -> Result, AvailabilityCheckError> { - let custody_columns = gossip_data_columns + let custody_columns = data_columns .into_iter() .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) .collect::>(); @@ -299,6 +286,17 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, custody_columns) } + pub fn put_kzg_verified_custody_data_columns< + I: IntoIterator>, + >( + &self, + block_root: Hash256, + custody_columns: I, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_kzg_verified_data_columns(block_root, custody_columns) + } + /// Check if we have all the blobs for a block. Returns `Availability` which has information /// about whether all components have been received or more are required. pub fn put_pending_executed_block( @@ -323,7 +321,6 @@ impl DataAvailabilityChecker { &self, block: RpcBlock, ) -> Result, AvailabilityCheckError> { - let custody_columns_count = block.custody_columns_count(); let (block_root, block, blobs, data_columns) = block.deconstruct(); if self.blobs_required_for_block(&block) { return if let Some(blob_list) = blobs { @@ -337,11 +334,7 @@ impl DataAvailabilityChecker { spec: self.spec.clone(), })) } else { - Ok(MaybeAvailableBlock::AvailabilityPending { - block_root, - block, - custody_columns_count, - }) + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) }; } if self.data_columns_required_for_block(&block) { @@ -366,11 +359,7 @@ impl DataAvailabilityChecker { spec: self.spec.clone(), })) } else { - Ok(MaybeAvailableBlock::AvailabilityPending { - block_root, - block, - custody_columns_count, - }) + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) }; } @@ -427,7 +416,6 @@ impl DataAvailabilityChecker { } for block in blocks { - let custody_columns_count = block.custody_columns_count(); let (block_root, block, blobs, data_columns) = block.deconstruct(); let maybe_available_block = if self.blobs_required_for_block(&block) { @@ -440,11 +428,7 @@ impl DataAvailabilityChecker { spec: self.spec.clone(), }) } else { - MaybeAvailableBlock::AvailabilityPending { - block_root, - block, - custody_columns_count, - } + MaybeAvailableBlock::AvailabilityPending { block_root, block } } } else if self.data_columns_required_for_block(&block) { if let Some(data_columns) = data_columns { @@ -458,11 +442,7 @@ impl DataAvailabilityChecker { spec: self.spec.clone(), }) } else { - MaybeAvailableBlock::AvailabilityPending { - block_root, - block, - custody_columns_count, - } + MaybeAvailableBlock::AvailabilityPending { block_root, block } } } else { MaybeAvailableBlock::Available(AvailableBlock { @@ -812,7 +792,6 @@ pub enum MaybeAvailableBlock { AvailabilityPending { block_root: Hash256, block: Arc>, - custody_columns_count: usize, }, } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 3478c183f3..deaea3eb24 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -8,6 +8,7 @@ use crate::block_verification_types::{ use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; use crate::BeaconChainTypes; +use crate::CustodyContext; use lru::LruCache; use parking_lot::RwLock; use std::cmp::Ordering; @@ -158,6 +159,7 @@ impl PendingComponents { pub fn make_available( &mut self, spec: &Arc, + num_expected_columns: u64, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -171,12 +173,11 @@ impl PendingComponents { }; let num_expected_blobs = block.num_blobs_expected(); - + let num_expected_columns = num_expected_columns as usize; let blob_data = if num_expected_blobs == 0 { Some(AvailableBlockData::NoData) } else if spec.is_peer_das_enabled_for_epoch(block.epoch()) { let num_received_columns = self.verified_data_columns.len(); - let num_expected_columns = block.custody_columns_count(); match num_received_columns.cmp(&num_expected_columns) { Ordering::Greater => { // Should never happen @@ -254,7 +255,6 @@ impl PendingComponents { block, import_data, payload_verification_outcome, - custody_columns_count: _, } = recover(block.clone())?; let available_block = AvailableBlock { @@ -308,19 +308,21 @@ impl PendingComponents { }) } - pub fn status_str(&self, block_epoch: Epoch, spec: &ChainSpec) -> String { + pub fn status_str( + &self, + block_epoch: Epoch, + num_expected_columns: Option, + spec: &ChainSpec, + ) -> String { let block_count = if self.executed_block.is_some() { 1 } else { 0 }; if spec.is_peer_das_enabled_for_epoch(block_epoch) { - let custody_columns_count = if let Some(block) = self.get_cached_block() { - &block.custody_columns_count().to_string() - } else { - "?" - }; format!( "block {} data_columns {}/{}", block_count, self.verified_data_columns.len(), - custody_columns_count, + num_expected_columns + .map(|c| c.to_string()) + .unwrap_or("?".into()) ) } else { let num_expected_blobs = if let Some(block) = self.get_cached_block() { @@ -346,6 +348,7 @@ pub struct DataAvailabilityCheckerInner { /// This cache holds a limited number of states in memory and reconstructs them /// from disk when necessary. This is necessary until we merge tree-states state_cache: StateLRUCache, + custody_context: Arc, spec: Arc, } @@ -362,11 +365,13 @@ impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, beacon_store: BeaconStore, + custody_context: Arc, spec: Arc, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), + custody_context, spec, }) } @@ -470,13 +475,15 @@ impl DataAvailabilityCheckerInner { debug!( component = "blobs", ?block_root, - status = pending_components.status_str(epoch, &self.spec), + status = pending_components.status_str(epoch, None, &self.spec), "Component added to data availability checker" ); - if let Some(available_block) = pending_components.make_available(&self.spec, |block| { - self.state_cache.recover_pending_executed_block(block) - })? { + if let Some(available_block) = pending_components.make_available( + &self.spec, + self.custody_context.sampling_size(Some(epoch), &self.spec), + |block| self.state_cache.recover_pending_executed_block(block), + )? { // We keep the pending components in the availability cache during block import (#5845). write_lock.put(block_root, pending_components); drop(write_lock); @@ -519,16 +526,19 @@ impl DataAvailabilityCheckerInner { // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; + let num_expected_columns = self.custody_context.sampling_size(Some(epoch), &self.spec); debug!( component = "data_columns", ?block_root, - status = pending_components.status_str(epoch, &self.spec), + status = pending_components.status_str(epoch, Some(num_expected_columns), &self.spec), "Component added to data availability checker" ); - if let Some(available_block) = pending_components.make_available(&self.spec, |block| { - self.state_cache.recover_pending_executed_block(block) - })? { + if let Some(available_block) = + pending_components.make_available(&self.spec, num_expected_columns, |block| { + self.state_cache.recover_pending_executed_block(block) + })? + { // We keep the pending components in the availability cache during block import (#5845). write_lock.put(block_root, pending_components); drop(write_lock); @@ -612,17 +622,20 @@ impl DataAvailabilityCheckerInner { // Merge in the block. pending_components.merge_block(diet_executed_block); + let num_expected_columns = self.custody_context.sampling_size(Some(epoch), &self.spec); debug!( component = "block", ?block_root, - status = pending_components.status_str(epoch, &self.spec), + status = pending_components.status_str(epoch, Some(num_expected_columns), &self.spec), "Component added to data availability checker" ); // Check if we have all components and entire set is consistent. - if let Some(available_block) = pending_components.make_available(&self.spec, |block| { - self.state_cache.recover_pending_executed_block(block) - })? { + if let Some(available_block) = pending_components.make_available( + &self.spec, + self.custody_context.sampling_size(Some(epoch), &self.spec), + |block| self.state_cache.recover_pending_executed_block(block), + )? { // We keep the pending components in the availability cache during block import (#5845). write_lock.put(block_root, pending_components); drop(write_lock); @@ -686,7 +699,6 @@ mod test { block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::STATE_LRU_CAPACITY, - eth1_finalization_cache::Eth1FinalizationData, test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; use fork_choice::PayloadVerificationStatus; @@ -700,7 +712,6 @@ mod test { use types::{ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; - const DEFAULT_TEST_CUSTODY_COLUMN_COUNT: usize = 8; fn get_store_with_spec( db_path: &TempDir, @@ -797,11 +808,6 @@ mod test { .expect("should get block") .expect("should have block"); - let parent_eth1_finalization_data = Eth1FinalizationData { - eth1_data: parent_block.message().body().eth1_data().clone(), - eth1_deposit_index: 0, - }; - let (signed_beacon_block_hash, (block, maybe_blobs), state) = harness .add_block_at_slot(target_slot, parent_state) .await @@ -848,7 +854,6 @@ mod test { block_root, state, parent_block, - parent_eth1_finalization_data, consensus_context, }; @@ -861,7 +866,6 @@ mod test { block, import_data, payload_verification_outcome, - custody_columns_count: DEFAULT_TEST_CUSTODY_COLUMN_COUNT, }; (availability_pending_block, gossip_verified_blobs) @@ -888,9 +892,15 @@ mod test { let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); + let custody_context = Arc::new(CustodyContext::new(false)); let cache = Arc::new( - DataAvailabilityCheckerInner::::new(capacity_non_zero, test_store, spec.clone()) - .expect("should create cache"), + DataAvailabilityCheckerInner::::new( + capacity_non_zero, + test_store, + custody_context, + spec.clone(), + ) + .expect("should create cache"), ); (harness, cache, chain_db_path) } @@ -1141,7 +1151,6 @@ mod test { mod pending_components_tests { use super::*; use crate::block_verification_types::BlockImportData; - use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::test_utils::{generate_rand_block_and_blobs, test_spec, NumBlobs}; use crate::PayloadVerificationOutcome; use fork_choice::PayloadVerificationStatus; @@ -1229,18 +1238,12 @@ mod pending_components_tests { block_root: Default::default(), state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()), parent_block: dummy_parent, - parent_eth1_finalization_data: Eth1FinalizationData { - eth1_data: Default::default(), - eth1_deposit_index: 0, - }, consensus_context: ConsensusContext::new(Slot::new(0)), }, payload_verification_outcome: PayloadVerificationOutcome { payload_verification_status: PayloadVerificationStatus::Verified, is_valid_merge_transition_block: false, }, - // Default custody columns count, doesn't matter here - custody_columns_count: 8, }; (block.into(), blobs, invalid_blobs) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 5fe674f30c..f16e138383 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -2,7 +2,6 @@ use crate::block_verification_types::AsBlock; use crate::{ block_verification_types::BlockImportData, data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, - eth1_finalization_cache::Eth1FinalizationData, AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, }; use lru::LruCache; @@ -21,10 +20,8 @@ pub struct DietAvailabilityPendingExecutedBlock { block: Arc>, state_root: Hash256, parent_block: SignedBeaconBlock>, - parent_eth1_finalization_data: Eth1FinalizationData, consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, - custody_columns_count: usize, } /// just implementing the same methods as `AvailabilityPendingExecutedBlock` @@ -54,10 +51,6 @@ impl DietAvailabilityPendingExecutedBlock { .unwrap_or_default() } - pub fn custody_columns_count(&self) -> usize { - self.custody_columns_count - } - /// Returns the epoch corresponding to `self.slot()`. pub fn epoch(&self) -> Epoch { self.block.slot().epoch(E::slots_per_epoch()) @@ -102,12 +95,10 @@ impl StateLRUCache { block: executed_block.block, state_root, parent_block: executed_block.import_data.parent_block, - parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, consensus_context: OnDiskConsensusContext::from_consensus_context( executed_block.import_data.consensus_context, ), payload_verification_outcome: executed_block.payload_verification_outcome, - custody_columns_count: executed_block.custody_columns_count, } } @@ -131,13 +122,11 @@ impl StateLRUCache { block_root, state, parent_block: diet_executed_block.parent_block, - parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, consensus_context: diet_executed_block .consensus_context .into_consensus_context(), }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, - custody_columns_count: diet_executed_block.custody_columns_count, }) } @@ -219,12 +208,10 @@ impl From> block: value.block, state_root: value.import_data.state.canonical_root().unwrap(), parent_block: value.import_data.parent_block, - parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, consensus_context: OnDiskConsensusContext::from_consensus_context( value.import_data.consensus_context, ), payload_verification_outcome: value.payload_verification_outcome, - custody_columns_count: value.custody_columns_count, } } } diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index b43b259cf6..3009522bf6 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -129,6 +129,10 @@ pub enum GossipDataColumnError { slot: Slot, index: ColumnIndex, }, + /// A column has already been processed from non-gossip source and have not yet been seen on + /// the gossip network. + /// This column should be accepted and forwarded over gossip. + PriorKnownUnpublished, /// Data column index must be between 0 and `NUMBER_OF_COLUMNS` (exclusive). /// /// ## Peer scoring @@ -181,6 +185,16 @@ pub struct GossipVerifiedDataColumn, } +impl Clone for GossipVerifiedDataColumn { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + data_column: self.data_column.clone(), + _phantom: PhantomData, + } + } +} + impl GossipVerifiedDataColumn { pub fn new( column_sidecar: Arc>, @@ -200,6 +214,15 @@ impl GossipVerifiedDataColumn ) } + /// Create a `GossipVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. + pub fn __new_for_testing(column_sidecar: Arc>) -> Self { + Self { + block_root: column_sidecar.block_root(), + data_column: KzgVerifiedDataColumn::__new_for_testing(column_sidecar), + _phantom: Default::default(), + } + } + pub fn as_data_column(&self) -> &DataColumnSidecar { self.data_column.as_data_column() } @@ -243,17 +266,25 @@ impl KzgVerifiedDataColumn { verify_kzg_for_data_column(data_column, kzg) } - /// Create a `KzgVerifiedDataColumn` from `data_column` that are already KZG verified. - /// - /// This should be used with caution, as used incorrectly it could result in KZG verification - /// being skipped and invalid data_columns being deemed valid. - pub fn from_verified(data_column: Arc>) -> Self { + /// Create a `KzgVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. + pub(crate) fn __new_for_testing(data_column: Arc>) -> Self { Self { data: data_column } } pub fn from_batch( data_columns: Vec>>, kzg: &Kzg, + ) -> Result, KzgError> { + verify_kzg_for_data_column_list(data_columns.iter(), kzg)?; + Ok(data_columns + .into_iter() + .map(|column| Self { data: column }) + .collect()) + } + + pub fn from_batch_with_scoring( + data_columns: Vec>>, + kzg: &Kzg, ) -> Result, Vec<(ColumnIndex, KzgError)>> { verify_kzg_for_data_column_list_with_scoring(data_columns.iter(), kzg)?; Ok(data_columns @@ -444,6 +475,23 @@ pub fn validate_data_column_sidecar_for_gossip( &chain.spec, )?; - let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let epoch = state.current_epoch(); + let proposers = state.get_beacon_proposer_indices(epoch, &chain.spec)?; // Prime the proposer shuffling cache with the newly-learned value. Ok::<_, GossipDataColumnError>(EpochBlockProposers { epoch: column_epoch, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2e6de463cc..b6db3fa84f 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -3,7 +3,6 @@ use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::data_availability_checker::AvailabilityCheckError; -use crate::eth1_chain::Error as Eth1ChainError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; @@ -271,7 +270,6 @@ pub enum BlockProductionError { BlockProcessingError(BlockProcessingError), EpochCacheError(EpochCacheError), ForkChoiceError(ForkChoiceError), - Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), StateAdvanceError(StateAdvanceError), OpPoolError(OpPoolError), @@ -307,7 +305,6 @@ pub enum BlockProductionError { easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); -easy_from_to!(Eth1ChainError, BlockProductionError); easy_from_to!(StateAdvanceError, BlockProductionError); easy_from_to!(ForkChoiceError, BlockProductionError); easy_from_to!(EpochCacheError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs deleted file mode 100644 index 8a79bff4c7..0000000000 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ /dev/null @@ -1,1208 +0,0 @@ -use crate::metrics; -use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; -use eth2::lighthouse::Eth1SyncStatusData; -use ethereum_hashing::hash; -use int_to_bytes::int_to_bytes32; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::get_new_eth1_data; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use store::{DBColumn, Error as StoreError, StoreItem}; -use task_executor::TaskExecutor; -use tracing::{debug, error, trace}; -use types::{ - BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, -}; - -type BlockNumber = u64; -type Eth1DataVoteCount = HashMap<(Eth1Data, BlockNumber), u64>; - -/// We will declare ourself synced with the Eth1 chain, even if we are this many blocks behind. -/// -/// This number (8) was chosen somewhat arbitrarily. -const ETH1_SYNC_TOLERANCE: u64 = 8; - -#[derive(Debug)] -pub enum Error { - /// Unable to return an Eth1Data for the given epoch. - EpochUnavailable, - /// An error from the backend service (e.g., the web3 data fetcher). - BackendError(String), - /// The deposit index of the state is higher than the deposit contract. This is a critical - /// consensus error. - DepositIndexTooHigh, - /// The current state was unable to return the root for the state at the start of the eth1 - /// voting period. - UnableToGetPreviousStateRoot(BeaconStateError), - /// The state required to find the previous eth1 block was not found in the store. - PreviousStateNotInDB(Hash256), - /// There was an error accessing an object in the database. - StoreError(StoreError), - /// The eth1 head block at the start of the eth1 voting period is unknown. - /// - /// The eth1 caches are likely stale. - UnknownVotingPeriodHead, - /// The block that was previously voted into the state is unknown. - /// - /// The eth1 caches are stale, or a junk value was voted into the chain. - UnknownPreviousEth1BlockHash, - /// An arithmetic error occurred. - ArithError(safe_arith::ArithError), -} - -impl From for Error { - fn from(e: safe_arith::ArithError) -> Self { - Self::ArithError(e) - } -} - -/// Returns an `Eth1SyncStatusData` given some parameters: -/// -/// - `latest_cached_block`: The latest eth1 block in our cache, if any. -/// - `head_block`: The block at the very head of our eth1 node (ignoring follow distance, etc). -/// - `genesis_time`: beacon chain genesis time. -/// - `current_slot`: current beacon chain slot. -/// - `spec`: current beacon chain specification. -fn get_sync_status( - latest_cached_block: Option<&Eth1Block>, - head_block: Option<&Eth1Block>, - genesis_time: u64, - current_slot: Option, - spec: &ChainSpec, -) -> Option { - let eth1_follow_distance_seconds = spec - .seconds_per_eth1_block - .saturating_mul(spec.eth1_follow_distance); - - // The voting target timestamp needs to be special-cased when we're before - // genesis (as defined by `current_slot == None`). - // - // For the sake of this status, when prior to genesis we want to invent some voting periods - // that are *before* genesis, so that we can indicate to users that we're actually adequately - // cached for where they are in time. - let voting_target_timestamp = if let Some(current_slot) = current_slot { - let period = E::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (current_slot / period) * period; - - let period_start = slot_start_seconds( - genesis_time, - spec.seconds_per_slot, - voting_period_start_slot, - ); - - period_start.saturating_sub(eth1_follow_distance_seconds) - } else { - // The number of seconds in an eth1 voting period. - let voting_period_duration = - E::slots_per_eth1_voting_period() as u64 * spec.seconds_per_slot; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); - - // The number of seconds between now and genesis. - let seconds_till_genesis = genesis_time.saturating_sub(now); - - // Determine how many voting periods are contained in distance between - // now and genesis, rounding up. - let voting_periods_past = seconds_till_genesis.div_ceil(voting_period_duration); - - // Return the start time of the current voting period*. - // - // *: This voting period doesn't *actually* exist, we're just using it to - // give useful logs prior to genesis. - genesis_time - .saturating_sub(voting_periods_past * voting_period_duration) - .saturating_sub(eth1_follow_distance_seconds) - }; - - let latest_cached_block_number = latest_cached_block.map(|b| b.number); - let latest_cached_block_timestamp = latest_cached_block.map(|b| b.timestamp); - let head_block_number = head_block.map(|b| b.number); - let head_block_timestamp = head_block.map(|b| b.timestamp); - - let eth1_node_sync_status_percentage = if let Some(head_block) = head_block { - let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); - let head_age = now.saturating_sub(head_block.timestamp); - - if head_age < ETH1_SYNC_TOLERANCE * spec.seconds_per_eth1_block { - // Always indicate we are fully synced if it's within the sync threshold. - 100.0 - } else { - let blocks_behind = head_age - .checked_div(spec.seconds_per_eth1_block) - .unwrap_or(0); - - let part = f64::from(head_block.number as u32); - let whole = f64::from(head_block.number.saturating_add(blocks_behind) as u32); - - if whole > 0.0 { - (part / whole) * 100.0 - } else { - // Avoids a divide-by-zero. - 0.0 - } - } - } else { - // Always return 0% synced if the head block of the eth1 chain is unknown. - 0.0 - }; - - // Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the - // current voting period. - let lighthouse_is_cached_and_ready = - latest_cached_block_timestamp.is_some_and(|t| t >= voting_target_timestamp); - - Some(Eth1SyncStatusData { - head_block_number, - head_block_timestamp, - latest_cached_block_number, - latest_cached_block_timestamp, - voting_target_timestamp, - eth1_node_sync_status_percentage, - lighthouse_is_cached_and_ready, - }) -} - -#[derive(Encode, Decode, Clone)] -pub struct SszEth1 { - pub use_dummy_backend: bool, - pub backend_bytes: Vec, -} - -impl StoreItem for SszEth1 { - fn db_column() -> DBColumn { - DBColumn::Eth1Cache - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. -pub struct Eth1Chain -where - T: Eth1ChainBackend, - E: EthSpec, -{ - backend: T, - /// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method - /// will be used instead. - use_dummy_backend: bool, - _phantom: PhantomData, -} - -impl Eth1Chain -where - T: Eth1ChainBackend, - E: EthSpec, -{ - pub fn new(backend: T) -> Self { - Self { - backend, - use_dummy_backend: false, - _phantom: PhantomData, - } - } - - pub fn new_dummy(backend: T) -> Self { - Self { - use_dummy_backend: true, - ..Self::new(backend) - } - } - - /// Returns `true` if the "dummy" backend is being used. - pub fn is_dummy_backend(&self) -> bool { - self.use_dummy_backend - } - - /// Returns the `Eth1Data` that should be included in a block being produced for the given - /// `state`. - pub fn eth1_data_for_block_production( - &self, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result { - if self.use_dummy_backend { - let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); - dummy_backend.eth1_data(state, spec) - } else { - self.backend.eth1_data(state, spec) - } - } - - /// Returns a list of `Deposits` that may be included in a block. - /// - /// Including all of the returned `Deposits` in a block should _not_ cause it to become - /// invalid (i.e., this function should respect the maximum). - /// - /// `eth1_data_vote` is the `Eth1Data` that the block producer would include in their - /// block. This vote may change the `state.eth1_data` value, which would change the deposit - /// count and therefore change the output of this function. - pub fn deposits_for_block_inclusion( - &self, - state: &BeaconState, - eth1_data_vote: &Eth1Data, - spec: &ChainSpec, - ) -> Result, Error> { - if self.use_dummy_backend { - let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); - dummy_backend.queued_deposits(state, eth1_data_vote, spec) - } else { - self.backend.queued_deposits(state, eth1_data_vote, spec) - } - } - - /// Returns a status indicating how synced our caches are with the eth1 chain. - pub fn sync_status( - &self, - genesis_time: u64, - current_slot: Option, - spec: &ChainSpec, - ) -> Option { - get_sync_status::( - self.backend.latest_cached_block().as_ref(), - self.backend.head_block().as_ref(), - genesis_time, - current_slot, - spec, - ) - } - - /// Instantiate `Eth1Chain` from a persisted `SszEth1`. - /// - /// The `Eth1Chain` will have the same caches as the persisted `SszEth1`. - pub fn from_ssz_container( - ssz_container: &SszEth1, - config: Eth1Config, - spec: Arc, - ) -> Result { - let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, spec)?; - Ok(Self { - use_dummy_backend: ssz_container.use_dummy_backend, - backend, - _phantom: PhantomData, - }) - } - - /// Return a `SszEth1` containing the state of `Eth1Chain`. - pub fn as_ssz_container(&self) -> SszEth1 { - SszEth1 { - use_dummy_backend: self.use_dummy_backend, - backend_bytes: self.backend.as_bytes(), - } - } - - /// Set in motion the finalization of `Eth1Data`. This method is called during block import - /// so it should be fast. - pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) { - self.backend.finalize_eth1_data(eth1_data); - } - - /// Consumes `self`, returning the backend. - pub fn into_backend(self) -> T { - self.backend - } -} - -pub trait Eth1ChainBackend: Sized + Send + Sync { - /// Returns the `Eth1Data` that should be included in a block being produced for the given - /// `state`. - fn eth1_data(&self, beacon_state: &BeaconState, spec: &ChainSpec) - -> Result; - - /// Returns all `Deposits` between `state.eth1_deposit_index` and - /// `state.eth1_data.deposit_count`. - /// - /// # Note: - /// - /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may - /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. - fn queued_deposits( - &self, - beacon_state: &BeaconState, - eth1_data_vote: &Eth1Data, - spec: &ChainSpec, - ) -> Result, Error>; - - /// Returns the latest block stored in the cache. Used to obtain an idea of how up-to-date the - /// beacon node eth1 cache is. - fn latest_cached_block(&self) -> Option; - - /// Set in motion the finalization of `Eth1Data`. This method is called during block import - /// so it should be fast. - fn finalize_eth1_data(&self, eth1_data: Eth1Data); - - /// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain - /// an idea of how up-to-date the remote eth1 node is. - fn head_block(&self) -> Option; - - /// Encode the `Eth1ChainBackend` instance to bytes. - fn as_bytes(&self) -> Vec; - - /// Create a `Eth1ChainBackend` instance given encoded bytes. - fn from_bytes(bytes: &[u8], config: Eth1Config, spec: Arc) -> Result; -} - -/// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. -/// -/// Never creates deposits, therefore the validator set is static. -/// -/// This was used in the 2019 Canada interop workshops. -pub struct DummyEth1ChainBackend(PhantomData); - -impl Eth1ChainBackend for DummyEth1ChainBackend { - /// Produce some deterministic junk based upon the current epoch. - fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { - // [New in Electra:EIP6110] - if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { - if state.eth1_deposit_index() == deposit_requests_start_index { - return Ok(state.eth1_data().clone()); - } - } - let current_epoch = state.current_epoch(); - let slots_per_voting_period = E::slots_per_eth1_voting_period() as u64; - let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; - - let deposit_root = hash(&int_to_bytes32(current_voting_period)); - let block_hash = hash(&deposit_root); - - Ok(Eth1Data { - deposit_root: Hash256::from_slice(&deposit_root), - deposit_count: state.eth1_deposit_index(), - block_hash: Hash256::from_slice(&block_hash), - }) - } - - /// The dummy back-end never produces deposits. - fn queued_deposits( - &self, - _: &BeaconState, - _: &Eth1Data, - _: &ChainSpec, - ) -> Result, Error> { - Ok(vec![]) - } - - fn latest_cached_block(&self) -> Option { - None - } - - fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {} - - fn head_block(&self) -> Option { - None - } - - /// Return empty Vec for dummy backend. - fn as_bytes(&self) -> Vec { - Vec::new() - } - - /// Create dummy eth1 backend. - fn from_bytes( - _bytes: &[u8], - _config: Eth1Config, - _spec: Arc, - ) -> Result { - Ok(Self(PhantomData)) - } -} - -impl Default for DummyEth1ChainBackend { - fn default() -> Self { - Self(PhantomData) - } -} - -/// Maintains a cache of eth1 blocks and deposits and provides functions to allow block producers -/// to include new deposits and vote on `Eth1Data`. -/// -/// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for -/// information. -#[derive(Clone)] -pub struct CachingEth1Backend { - pub core: HttpService, - _phantom: PhantomData, -} - -impl CachingEth1Backend { - /// Instantiates `self` with empty caches. - /// - /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, spec: Arc) -> Result { - Ok(Self { - core: HttpService::new(config, spec) - .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, - _phantom: PhantomData, - }) - } - - /// Starts the routine which connects to the external eth1 node and updates the caches. - pub fn start(&self, handle: TaskExecutor) { - HttpService::auto_update(self.core.clone(), handle); - } - - /// Instantiates `self` from an existing service. - pub fn from_service(service: HttpService) -> Self { - Self { - core: service, - _phantom: PhantomData, - } - } -} - -impl Eth1ChainBackend for CachingEth1Backend { - fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { - // [New in Electra:EIP6110] - if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { - if state.eth1_deposit_index() == deposit_requests_start_index { - return Ok(state.eth1_data().clone()); - } - } - let period = E::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (state.slot() / period) * period; - let voting_period_start_seconds = slot_start_seconds( - state.genesis_time(), - spec.seconds_per_slot, - voting_period_start_slot, - ); - - let votes_to_consider = { - let blocks = self.core.blocks().read(); - get_votes_to_consider(blocks.iter(), voting_period_start_seconds, spec) - }; - - trace!( - votes_to_consider = votes_to_consider.len(), - "Found eth1 data votes_to_consider" - ); - let valid_votes = collect_valid_votes(state, &votes_to_consider); - - let eth1_data = if let Some(eth1_data) = find_winning_vote(valid_votes) { - eth1_data - } else { - // In this case, there are no valid votes available. - // - // Here we choose the eth1_data corresponding to the latest block in our voting window. - // If no votes exist, choose `state.eth1_data` as default vote. - votes_to_consider - .iter() - .max_by_key(|(_, block_number)| *block_number) - .map(|vote| { - let vote = vote.0.clone(); - debug!( - outcome = "Casting vote corresponding to last candidate eth1 block", - ?vote, - "No valid eth1_data votes" - ); - vote - }) - .unwrap_or_else(|| { - let vote = state.eth1_data().clone(); - error!( - lowest_block_number = self.core.lowest_block_number(), - earliest_block_timestamp = self.core.earliest_block_timestamp(), - genesis_time = state.genesis_time(), - outcome = "casting `state.eth1_data` as eth1 vote", - "No valid eth1_data votes, `votes_to_consider` empty" - ); - metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES); - vote - }) - }; - - debug!( - deposit_root = ?eth1_data.deposit_root, - deposit_count = eth1_data.deposit_count, - block_hash = ?eth1_data.block_hash, - "Produced vote for eth1 chain" - ); - - Ok(eth1_data) - } - - fn queued_deposits( - &self, - state: &BeaconState, - eth1_data_vote: &Eth1Data, - _spec: &ChainSpec, - ) -> Result, Error> { - let deposit_index = state.eth1_deposit_index(); - let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? { - new_eth1_data.deposit_count - } else { - state.eth1_data().deposit_count - }; - - // [New in Electra:EIP6110] - let deposit_index_limit = - if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { - std::cmp::min(deposit_count, deposit_requests_start_index) - } else { - deposit_count - }; - - match deposit_index.cmp(&deposit_index_limit) { - Ordering::Greater => Err(Error::DepositIndexTooHigh), - Ordering::Equal => Ok(vec![]), - Ordering::Less => { - let next = deposit_index; - let last = std::cmp::min(deposit_index_limit, next + E::MaxDeposits::to_u64()); - - self.core - .deposits() - .read() - .cache - .get_deposits(next, last, deposit_count) - .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) - .map(|(_deposit_root, deposits)| deposits) - } - } - } - - fn latest_cached_block(&self) -> Option { - self.core.latest_cached_block() - } - - /// This only writes the eth1_data to a temporary cache so that the service - /// thread can later do the actual finalizing of the deposit tree. - fn finalize_eth1_data(&self, eth1_data: Eth1Data) { - self.core.set_to_finalize(Some(eth1_data)); - } - - fn head_block(&self) -> Option { - self.core.head_block() - } - - /// Return encoded byte representation of the block and deposit caches. - fn as_bytes(&self) -> Vec { - self.core.as_bytes() - } - - /// Recover the cached backend from encoded bytes. - fn from_bytes(bytes: &[u8], config: Eth1Config, spec: Arc) -> Result { - let inner = HttpService::from_bytes(bytes, config, spec)?; - Ok(Self { - core: inner, - _phantom: PhantomData, - }) - } -} - -/// Get all votes from eth1 blocks which are in the list of candidate blocks for the -/// current eth1 voting period. -/// -/// Returns a hashmap of `Eth1Data` to its associated eth1 `block_number`. -fn get_votes_to_consider<'a, I>( - blocks: I, - voting_period_start_seconds: u64, - spec: &ChainSpec, -) -> HashMap -where - I: DoubleEndedIterator + Clone, -{ - blocks - .rev() - .skip_while(|eth1_block| !is_candidate_block(eth1_block, voting_period_start_seconds, spec)) - .take_while(|eth1_block| is_candidate_block(eth1_block, voting_period_start_seconds, spec)) - .filter_map(|eth1_block| { - eth1_block - .clone() - .eth1_data() - .map(|eth1_data| (eth1_data, eth1_block.number)) - }) - .collect() -} - -/// Collect all valid votes that are cast during the current voting period. -/// Return hashmap with count of each vote cast. -fn collect_valid_votes( - state: &BeaconState, - votes_to_consider: &HashMap, -) -> Eth1DataVoteCount { - let mut valid_votes = HashMap::new(); - state - .eth1_data_votes() - .iter() - .filter_map(|vote| { - votes_to_consider - .get(vote) - .map(|block_num| (vote.clone(), *block_num)) - }) - .for_each(|(eth1_data, block_number)| { - valid_votes - .entry((eth1_data, block_number)) - .and_modify(|count| *count += 1) - .or_insert(1_u64); - }); - valid_votes -} - -/// Selects the winning vote from `valid_votes`. -fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { - valid_votes - .iter() - .max_by_key(|((_eth1_data, block_number), vote_count)| (*vote_count, block_number)) - .map(|((eth1_data, _), _)| eth1_data.clone()) -} - -/// Returns the unix-epoch seconds at the start of the given `slot`. -fn slot_start_seconds(genesis_unix_seconds: u64, seconds_per_slot: u64, slot: Slot) -> u64 { - genesis_unix_seconds + slot.as_u64() * seconds_per_slot -} - -/// Returns a boolean denoting if a given `Eth1Block` is a candidate for `Eth1Data` calculation -/// at the timestamp `period_start`. -/// -/// Note: `period_start` needs to be atleast (`spec.seconds_per_eth1_block * spec.eth1_follow_distance * 2`) -/// for this function to return meaningful values. -fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) -> bool { - block.timestamp - <= period_start.saturating_sub(spec.seconds_per_eth1_block * spec.eth1_follow_distance) - && block.timestamp - >= period_start - .saturating_sub(spec.seconds_per_eth1_block * spec.eth1_follow_distance * 2) -} - -#[cfg(test)] -mod test { - use super::*; - use types::{DepositData, FixedBytesExtended, MinimalEthSpec, Signature}; - - type E = MinimalEthSpec; - - fn get_eth1_data(i: u64) -> Eth1Data { - Eth1Data { - block_hash: Hash256::from_low_u64_be(i), - deposit_root: Hash256::from_low_u64_be(u64::MAX - i), - deposit_count: i, - } - } - - fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { - let period = ::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (state.slot() / period) * period; - slot_start_seconds( - state.genesis_time(), - spec.seconds_per_slot, - voting_period_start_slot, - ) - } - - #[test] - fn slot_start_time() { - let zero_sec = 0; - assert_eq!(slot_start_seconds(100, zero_sec, Slot::new(2)), 100); - - let one_sec = 1; - assert_eq!(slot_start_seconds(100, one_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds(100, one_sec, Slot::new(1)), 101); - assert_eq!(slot_start_seconds(100, one_sec, Slot::new(2)), 102); - - let three_sec = 3; - assert_eq!(slot_start_seconds(100, three_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds(100, three_sec, Slot::new(1)), 103); - assert_eq!(slot_start_seconds(100, three_sec, Slot::new(2)), 106); - - let five_sec = 5; - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(1)), 105); - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(2)), 110); - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(3)), 115); - } - - fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { - Eth1Block { - number, - timestamp, - hash: Hash256::from_low_u64_be(number), - deposit_root: Some(Hash256::from_low_u64_be(number)), - deposit_count: Some(number), - } - } - - mod eth1_chain_json_backend { - use super::*; - use eth1::DepositLog; - use logging::create_test_tracing_subscriber; - use types::{test_utils::generate_deterministic_keypair, MainnetEthSpec}; - - fn get_eth1_chain() -> Eth1Chain, E> { - create_test_tracing_subscriber(); - - let eth1_config = Eth1Config { - ..Eth1Config::default() - }; - - Eth1Chain::new( - CachingEth1Backend::new(eth1_config, Arc::new(MainnetEthSpec::default_spec())) - .unwrap(), - ) - } - - fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { - let keypair = generate_deterministic_keypair(i as usize); - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount: spec.max_effective_balance, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - DepositLog { - deposit_data: deposit, - block_number: i, - index: i, - signature_is_valid: true, - } - } - - #[test] - fn deposits_empty_cache() { - let spec = &E::default_spec(); - - let eth1_chain = get_eth1_chain(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - *state.eth1_deposit_index_mut() = 0; - state.eth1_data_mut().deposit_count = 0; - - assert!( - eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .is_ok(), - "should succeed if cache is empty but no deposits are required" - ); - - state.eth1_data_mut().deposit_count = 1; - - assert!( - eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .is_err(), - "should fail to get deposits if required, but cache is empty" - ); - } - - #[test] - fn deposits_with_cache() { - let spec = &E::default_spec(); - - let eth1_chain = get_eth1_chain(); - let max_deposits = ::MaxDeposits::to_u64(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let deposits: Vec<_> = (0..max_deposits + 2) - .map(|i| get_deposit_log(i, spec)) - .inspect(|log| { - eth1_chain - .backend - .core - .deposits() - .write() - .cache - .insert_log(log.clone()) - .expect("should insert log"); - }) - .collect(); - - assert_eq!( - eth1_chain.backend.core.deposits().write().cache.len(), - deposits.len(), - "cache should store all logs" - ); - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - *state.eth1_deposit_index_mut() = 0; - state.eth1_data_mut().deposit_count = 0; - - assert!( - eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .is_ok(), - "should succeed if no deposits are required" - ); - - (0..3).for_each(|initial_deposit_index| { - *state.eth1_deposit_index_mut() = initial_deposit_index as u64; - - (initial_deposit_index..deposits.len()).for_each(|i| { - state.eth1_data_mut().deposit_count = i as u64; - - let deposits_for_inclusion = eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .unwrap_or_else(|_| panic!("should find deposit for {}", i)); - - let expected_len = - std::cmp::min(i - initial_deposit_index, max_deposits as usize); - - assert_eq!( - deposits_for_inclusion.len(), - expected_len, - "should find {} deposits", - expected_len - ); - - let deposit_data_for_inclusion: Vec<_> = deposits_for_inclusion - .into_iter() - .map(|deposit| deposit.data) - .collect(); - - let expected_deposit_data: Vec<_> = deposits[initial_deposit_index - ..std::cmp::min(initial_deposit_index + expected_len, deposits.len())] - .iter() - .map(|log| log.deposit_data.clone()) - .collect(); - - assert_eq!( - deposit_data_for_inclusion, expected_deposit_data, - "should find the correct deposits for {}", - i - ); - }); - }) - } - - #[test] - fn eth1_data_empty_cache() { - let spec = &E::default_spec(); - - let eth1_chain = get_eth1_chain(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let a = eth1_chain - .eth1_data_for_block_production(&state, spec) - .expect("should produce default eth1 data vote"); - assert_eq!( - a, - *state.eth1_data(), - "default vote should be same as state.eth1_data" - ); - } - - #[test] - fn default_vote() { - let spec = &E::default_spec(); - let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); - let eth1_follow_distance = spec.eth1_follow_distance; - - let eth1_chain = get_eth1_chain(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); - let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; - let voting_period_start = get_voting_period_start_seconds(&state, spec); - let start_eth1_block = voting_period_start - follow_distance_seconds * 2; - let end_eth1_block = voting_period_start - follow_distance_seconds; - - // Populate blocks cache with candidate eth1 blocks - let blocks = (start_eth1_block..end_eth1_block) - .map(|i| get_eth1_block(i, i)) - .collect::>(); - - blocks.iter().for_each(|block| { - eth1_chain - .backend - .core - .blocks() - .write() - .insert_root_or_child(block.clone()) - .expect("should add blocks to cache"); - }); - - let vote = eth1_chain - .eth1_data_for_block_production(&state, spec) - .expect("should produce default eth1 data vote"); - - assert_eq!( - vote, - blocks - .last() - .expect("should have blocks") - .clone() - .eth1_data() - .expect("should have valid eth1 data"), - "default vote must correspond to last block in candidate blocks" - ); - } - } - - mod eth1_data_sets { - use super::*; - - #[test] - fn empty_cache() { - let spec = &E::default_spec(); - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let blocks = []; - - assert_eq!( - get_votes_to_consider( - blocks.iter(), - get_voting_period_start_seconds(&state, spec), - spec, - ), - HashMap::new() - ); - } - - #[test] - fn ideal_scenario() { - let spec = E::default_spec(); - - let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); - let eth1_follow_distance = spec.eth1_follow_distance; - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); - *state.genesis_time_mut() = 0; - *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); - - let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; - let voting_period_start = get_voting_period_start_seconds(&state, &spec); - let start_eth1_block = voting_period_start - follow_distance_seconds * 2; - let end_eth1_block = voting_period_start - follow_distance_seconds; - let blocks = (start_eth1_block..end_eth1_block) - .map(|i| get_eth1_block(i, i)) - .collect::>(); - - let votes_to_consider = - get_votes_to_consider(blocks.iter(), voting_period_start, &spec); - assert_eq!( - votes_to_consider.len() as u64, - end_eth1_block - start_eth1_block, - "all produced eth1 blocks should be in votes to consider" - ); - - (start_eth1_block..end_eth1_block) - .map(|i| get_eth1_block(i, i)) - .for_each(|eth1_block| { - assert_eq!( - eth1_block.number, - *votes_to_consider - .get(ð1_block.clone().eth1_data().unwrap()) - .expect("votes_to_consider should have expected block numbers") - ) - }); - } - } - - mod collect_valid_votes { - use super::*; - use types::List; - - fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { - (0..n) - .map(|i| (get_eth1_data(i), i + block_number_offset)) - .collect() - } - - macro_rules! assert_votes { - ($votes: expr, $expected: expr, $text: expr) => { - let expected: Vec<(Eth1Data, BlockNumber)> = $expected; - assert_eq!( - $votes.len(), - expected.len(), - "map should have the same number of elements" - ); - expected.iter().for_each(|(eth1_data, block_number)| { - $votes - .get(&(eth1_data.clone(), *block_number)) - .expect("should contain eth1 data"); - }) - }; - } - - #[test] - fn no_votes_in_state() { - let slots = ::SlotsPerEth1VotingPeriod::to_u64(); - let spec = &E::default_spec(); - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let votes_to_consider = get_eth1_data_vec(slots, 0); - - let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); - assert_eq!( - votes.len(), - 0, - "should not find any votes when state has no votes" - ); - } - - #[test] - fn distinct_votes_in_state() { - let slots = ::SlotsPerEth1VotingPeriod::to_u64(); - let spec = &E::default_spec(); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let votes_to_consider = get_eth1_data_vec(slots, 0); - - *state.eth1_data_votes_mut() = List::new( - votes_to_consider[0..slots as usize / 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>(), - ) - .unwrap(); - - let votes = - collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect()); - assert_votes!( - votes, - votes_to_consider[0..slots as usize / 4].to_vec(), - "should find as many votes as were in the state" - ); - } - - #[test] - fn duplicate_votes_in_state() { - let slots = ::SlotsPerEth1VotingPeriod::to_u64(); - let spec = &E::default_spec(); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let votes_to_consider = get_eth1_data_vec(slots, 0); - - let duplicate_eth1_data = votes_to_consider - .last() - .expect("should have some eth1 data") - .clone(); - - *state.eth1_data_votes_mut() = List::new( - vec![duplicate_eth1_data.clone(); 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>(), - ) - .unwrap(); - - let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); - assert_votes!( - votes, - // There should only be one value if there's a duplicate - vec![duplicate_eth1_data.clone()], - "should find as many votes as were in the state" - ); - - assert_eq!( - *votes - .get(&duplicate_eth1_data) - .expect("should contain vote"), - 4, - "should have four votes" - ); - } - } - - mod winning_vote { - use super::*; - - type Vote = ((Eth1Data, u64), u64); - - fn vote(block_number: u64, vote_count: u64) -> Vote { - ( - ( - Eth1Data { - deposit_root: Hash256::from_low_u64_be(block_number), - deposit_count: block_number, - block_hash: Hash256::from_low_u64_be(block_number), - }, - block_number, - ), - vote_count, - ) - } - - fn vote_data(vote: &Vote) -> Eth1Data { - (vote.0).0.clone() - } - - #[test] - fn no_votes() { - let no_votes = vec![vote(0, 0), vote(1, 0), vote(3, 0), vote(2, 0)]; - - assert_eq!( - // Favour the highest block number when there are no votes. - vote_data(&no_votes[2]), - find_winning_vote(no_votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn equal_votes() { - let votes = vec![vote(0, 1), vote(1, 1), vote(3, 1), vote(2, 1)]; - - assert_eq!( - // Favour the highest block number when there are equal votes. - vote_data(&votes[2]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn some_votes() { - let votes = vec![vote(0, 0), vote(1, 1), vote(3, 1), vote(2, 2)]; - - assert_eq!( - // Favour the highest vote over the highest block number. - vote_data(&votes[3]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn tying_votes() { - let votes = vec![vote(0, 0), vote(1, 1), vote(2, 2), vote(3, 2)]; - - assert_eq!( - // Favour the highest block number for tying votes. - vote_data(&votes[3]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn all_tying_votes() { - let votes = vec![vote(3, 42), vote(2, 42), vote(1, 42), vote(0, 42)]; - - assert_eq!( - // Favour the highest block number for tying votes. - vote_data(&votes[0]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - } -} diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs deleted file mode 100644 index 8c3bb8c483..0000000000 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ /dev/null @@ -1,482 +0,0 @@ -use ssz_derive::{Decode, Encode}; -use std::cmp; -use std::collections::BTreeMap; -use tracing::debug; -use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; - -/// The default size of the cache. -/// The beacon chain only looks at the last 4 epochs for finalization. -/// Add 1 for current epoch and 4 earlier epochs. -pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; - -/// These fields are named the same as the corresponding fields in the `BeaconState` -/// as this structure stores these values from the `BeaconState` at a `Checkpoint` -#[derive(Clone, Debug, PartialEq, Encode, Decode)] -pub struct Eth1FinalizationData { - pub eth1_data: Eth1Data, - pub eth1_deposit_index: u64, -} - -impl Eth1FinalizationData { - /// Ensures the deposit finalization conditions have been met. See: - /// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions - fn fully_imported(&self) -> bool { - self.eth1_deposit_index >= self.eth1_data.deposit_count - } -} - -/// Implements map from Checkpoint -> Eth1CacheData -pub struct CheckpointMap { - capacity: usize, - // There shouldn't be more than a couple of potential checkpoints at the same - // epoch. Searching through a vector for the matching Root should be faster - // than using another map from Root->Eth1CacheData - store: BTreeMap>, -} - -impl Default for CheckpointMap { - fn default() -> Self { - Self::new() - } -} - -/// Provides a map of `Eth1CacheData` referenced by `Checkpoint` -/// -/// ## Cache Queuing -/// -/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be -/// forks at the epoch boundary, it's possible that there exists more than one -/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for -/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number -/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped -impl CheckpointMap { - pub fn new() -> Self { - CheckpointMap { - capacity: DEFAULT_ETH1_CACHE_SIZE, - store: BTreeMap::new(), - } - } - - pub fn with_capacity(capacity: usize) -> Self { - CheckpointMap { - capacity: cmp::max(1, capacity), - store: BTreeMap::new(), - } - } - - pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { - self.store - .entry(checkpoint.epoch) - .or_default() - .push((checkpoint.root, eth1_finalization_data)); - - // faster to reduce size after the fact than do pre-checking to see - // if the current data would increase the size of the BTreeMap - while self.store.len() > self.capacity { - let oldest_stored_epoch = self.store.keys().next().cloned().unwrap(); - self.store.remove(&oldest_stored_epoch); - } - } - - pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> { - match self.store.get(&checkpoint.epoch) { - Some(vec) => { - for (root, data) in vec { - if *root == checkpoint.root { - return Some(data); - } - } - None - } - None => None, - } - } - - #[cfg(test)] - pub fn len(&self) -> usize { - self.store.len() - } -} - -/// This cache stores `Eth1CacheData` that could potentially be finalized within 4 -/// future epochs. -#[derive(Default)] -pub struct Eth1FinalizationCache { - by_checkpoint: CheckpointMap, - pending_eth1: BTreeMap, - last_finalized: Option, -} - -/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to -/// finalize deposits when a new epoch is finalized. -/// -impl Eth1FinalizationCache { - pub fn with_capacity(capacity: usize) -> Self { - Eth1FinalizationCache { - by_checkpoint: CheckpointMap::with_capacity(capacity), - pending_eth1: BTreeMap::new(), - last_finalized: None, - } - } - - pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { - if !eth1_finalization_data.fully_imported() { - self.pending_eth1.insert( - eth1_finalization_data.eth1_data.deposit_count, - eth1_finalization_data.eth1_data.clone(), - ); - debug!( - eth1_data.deposit_count = eth1_finalization_data.eth1_data.deposit_count, - eth1_deposit_index = eth1_finalization_data.eth1_deposit_index, - "Eth1Cache: inserted pending eth1" - ); - } - self.by_checkpoint - .insert(checkpoint, eth1_finalization_data); - } - - pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option { - if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) { - let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index; - let mut result = None; - while let Some(pending_count) = self.pending_eth1.keys().next().cloned() { - if finalized_deposit_index >= pending_count { - result = self.pending_eth1.remove(&pending_count); - debug!( - pending_count, - finalized_deposit_index, "Eth1Cache: dropped pending eth1" - ); - } else { - break; - } - } - if eth1_finalized_data.fully_imported() { - result = Some(eth1_finalized_data.eth1_data.clone()) - } - if result.is_some() { - self.last_finalized = result; - } - self.last_finalized.clone() - } else { - debug!( - epoch = %checkpoint.epoch, - "Eth1Cache: cache miss" - ); - None - } - } - - #[cfg(test)] - pub fn by_checkpoint(&self) -> &CheckpointMap { - &self.by_checkpoint - } - - #[cfg(test)] - pub fn pending_eth1(&self) -> &BTreeMap { - &self.pending_eth1 - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use std::collections::HashMap; - - const SLOTS_PER_EPOCH: u64 = 32; - const MAX_DEPOSITS: u64 = 16; - const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64; - - fn eth1cache() -> Eth1FinalizationCache { - Eth1FinalizationCache::default() - } - - fn random_eth1_data(deposit_count: u64) -> Eth1Data { - Eth1Data { - deposit_root: Root::random(), - deposit_count, - block_hash: Root::random(), - } - } - - fn random_checkpoint(epoch: u64) -> Checkpoint { - Checkpoint { - epoch: epoch.into(), - root: Root::random(), - } - } - - fn random_checkpoints(n: usize) -> Vec { - let mut result = Vec::with_capacity(n); - for epoch in 0..n { - result.push(random_checkpoint(epoch as u64)) - } - result - } - - #[test] - fn fully_imported_deposits() { - let epochs = 16; - let deposits_imported = 128; - - let eth1data = random_eth1_data(deposits_imported); - let checkpoints = random_checkpoints(epochs as usize); - let mut eth1cache = eth1cache(); - - for epoch in 4..epochs { - assert_eq!( - eth1cache.by_checkpoint().len(), - cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE), - "Unexpected cache size" - ); - - let checkpoint = checkpoints - .get(epoch as usize) - .expect("should get checkpoint"); - eth1cache.insert( - *checkpoint, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - - let finalized_checkpoint = checkpoints - .get((epoch - 4) as usize) - .expect("should get finalized checkpoint"); - assert!( - eth1cache.pending_eth1().is_empty(), - "Deposits are fully imported so pending cache should be empty" - ); - if epoch < 8 { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - None, - "Should have cache miss" - ); - } else { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - Some(eth1data.clone()), - "Should have cache hit" - ) - } - } - } - - #[test] - fn partially_imported_deposits() { - let epochs = 16; - let initial_deposits_imported = 1024; - let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; - let full_import_epoch = 13; - let total_deposits = - initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch; - - let eth1data = random_eth1_data(total_deposits); - let checkpoints = random_checkpoints(epochs as usize); - let mut eth1cache = eth1cache(); - - for epoch in 0..epochs { - assert_eq!( - eth1cache.by_checkpoint().len(), - cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), - "Unexpected cache size" - ); - - let checkpoint = checkpoints - .get(epoch as usize) - .expect("should get checkpoint"); - let deposits_imported = cmp::min( - total_deposits, - initial_deposits_imported + deposits_imported_per_epoch * epoch, - ); - eth1cache.insert( - *checkpoint, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - - if epoch >= 4 { - let finalized_epoch = epoch - 4; - let finalized_checkpoint = checkpoints - .get(finalized_epoch as usize) - .expect("should get finalized checkpoint"); - if finalized_epoch < full_import_epoch { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - None, - "Deposits not fully finalized so cache should return no Eth1Data", - ); - assert_eq!( - eth1cache.pending_eth1().len(), - 1, - "Deposits not fully finalized. Pending eth1 cache should have 1 entry" - ); - } else { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - Some(eth1data.clone()), - "Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]", - (initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch), - ); - assert!( - eth1cache.pending_eth1().is_empty(), - "Deposits fully imported and finalized. Pending cache should be empty" - ); - } - } - } - } - - #[test] - fn fork_at_epoch_boundary() { - let epochs = 12; - let deposits_imported = 128; - - let eth1data = random_eth1_data(deposits_imported); - let checkpoints = random_checkpoints(epochs as usize); - let mut forks = HashMap::new(); - let mut eth1cache = eth1cache(); - - for epoch in 0..epochs { - assert_eq!( - eth1cache.by_checkpoint().len(), - cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), - "Unexpected cache size" - ); - - let checkpoint = checkpoints - .get(epoch as usize) - .expect("should get checkpoint"); - eth1cache.insert( - *checkpoint, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - // lets put a fork at every third epoch - if epoch % 3 == 0 { - let fork = random_checkpoint(epoch); - eth1cache.insert( - fork, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - forks.insert(epoch as usize, fork); - } - - assert!( - eth1cache.pending_eth1().is_empty(), - "Deposits are fully imported so pending cache should be empty" - ); - if epoch >= 4 { - let finalized_epoch = (epoch - 4) as usize; - let finalized_checkpoint = if finalized_epoch % 3 == 0 { - forks.get(&finalized_epoch).expect("should get fork") - } else { - checkpoints - .get(finalized_epoch) - .expect("should get checkpoint") - }; - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - Some(eth1data.clone()), - "Should have cache hit" - ); - if finalized_epoch >= 3 { - let dropped_epoch = finalized_epoch - 3; - if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) { - // got checkpoint for an old fork that should no longer - // be in the cache because it is from too long ago - assert_eq!( - eth1cache.finalize(dropped_checkpoint), - None, - "Should have cache miss" - ); - } - } - } - } - } - - #[test] - fn massive_deposit_queue() { - // Simulating a situation where deposits don't get imported within an eth1 voting period - let eth1_voting_periods = 8; - let initial_deposits_imported = 1024; - let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; - let initial_deposit_queue = - deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32; - let new_deposits_per_voting_period = - EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2; - - let mut epoch_data = BTreeMap::new(); - let mut eth1s_by_count = BTreeMap::new(); - let mut eth1cache = eth1cache(); - let mut last_period_deposits = initial_deposits_imported; - for period in 0..eth1_voting_periods { - let period_deposits = initial_deposits_imported - + initial_deposit_queue - + period * new_deposits_per_voting_period; - let period_eth1_data = random_eth1_data(period_deposits); - eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone()); - - for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD { - let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period; - let checkpoint = random_checkpoint(epoch); - let deposits_imported = cmp::min( - period_deposits, - last_period_deposits + deposits_imported_per_epoch * epoch_mod_period, - ); - eth1cache.insert( - checkpoint, - Eth1FinalizationData { - eth1_data: period_eth1_data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - epoch_data.insert(epoch, (checkpoint, deposits_imported)); - - if epoch >= 4 { - let finalized_epoch = epoch - 4; - let (finalized_checkpoint, finalized_deposits) = epoch_data - .get(&finalized_epoch) - .expect("should get epoch data"); - - let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count(); - let last_finalized_eth1 = eth1s_by_count - .range(0..(finalized_deposits + 1)) - .map(|(_, eth1)| eth1) - .next_back() - .cloned(); - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - last_finalized_eth1, - "finalized checkpoint mismatch", - ); - assert_eq!( - eth1cache.pending_eth1().len(), - pending_eth1s, - "pending eth1 mismatch" - ); - } - } - - // remove unneeded stuff from old epochs - while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE { - let oldest_stored_epoch = epoch_data - .keys() - .next() - .cloned() - .expect("should get oldest epoch"); - epoch_data.remove(&oldest_stored_epoch); - } - last_period_deposits = period_deposits; - } - } -} diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index d09b74e645..94ebfb4655 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -11,6 +11,7 @@ pub struct ServerSentEventHandler { single_attestation_tx: Sender>, block_tx: Sender>, blob_sidecar_tx: Sender>, + data_column_sidecar_tx: Sender>, finalized_tx: Sender>, head_tx: Sender>, exit_tx: Sender>, @@ -37,6 +38,7 @@ impl ServerSentEventHandler { let (single_attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); let (blob_sidecar_tx, _) = broadcast::channel(capacity); + let (data_column_sidecar_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); let (head_tx, _) = broadcast::channel(capacity); let (exit_tx, _) = broadcast::channel(capacity); @@ -57,6 +59,7 @@ impl ServerSentEventHandler { single_attestation_tx, block_tx, blob_sidecar_tx, + data_column_sidecar_tx, finalized_tx, head_tx, exit_tx, @@ -99,6 +102,10 @@ impl ServerSentEventHandler { .blob_sidecar_tx .send(kind) .map(|count| log_count("blob sidecar", count)), + EventKind::DataColumnSidecar(_) => self + .data_column_sidecar_tx + .send(kind) + .map(|count| log_count("data_column_sidecar", count)), EventKind::FinalizedCheckpoint(_) => self .finalized_tx .send(kind) @@ -177,6 +184,10 @@ impl ServerSentEventHandler { self.blob_sidecar_tx.subscribe() } + pub fn subscribe_data_column_sidecar(&self) -> Receiver> { + self.data_column_sidecar_tx.subscribe() + } + pub fn subscribe_finalized(&self) -> Receiver> { self.finalized_tx.subscribe() } @@ -249,6 +260,10 @@ impl ServerSentEventHandler { self.blob_sidecar_tx.receiver_count() > 0 } + pub fn has_data_column_sidecar_subscribers(&self) -> bool { + self.data_column_sidecar_tx.receiver_count() > 0 + } + pub fn has_finalized_subscribers(&self) -> bool { self.finalized_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs new file mode 100644 index 0000000000..4a7a5aeea2 --- /dev/null +++ b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs @@ -0,0 +1,122 @@ +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::data_column_verification::KzgVerifiedDataColumn; +use crate::fetch_blobs::{EngineGetBlobsOutput, FetchEngineBlobError}; +use crate::observed_block_producers::ProposalKey; +use crate::observed_data_sidecars::DoNotObserve; +use crate::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; +use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; +use kzg::{Error as KzgError, Kzg}; +#[cfg(test)] +use mockall::automock; +use std::collections::HashSet; +use std::sync::Arc; +use task_executor::TaskExecutor; +use types::{BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, Hash256, Slot}; + +/// An adapter to the `BeaconChain` functionalities to remove `BeaconChain` from direct dependency to enable testing fetch blobs logic. +pub(crate) struct FetchBlobsBeaconAdapter { + chain: Arc>, + spec: Arc, +} + +#[cfg_attr(test, automock, allow(dead_code))] +impl FetchBlobsBeaconAdapter { + pub(crate) fn new(chain: Arc>) -> Self { + let spec = chain.spec.clone(); + Self { chain, spec } + } + + pub(crate) fn spec(&self) -> &Arc { + &self.spec + } + + pub(crate) fn kzg(&self) -> &Arc { + &self.chain.kzg + } + + pub(crate) fn executor(&self) -> &TaskExecutor { + &self.chain.task_executor + } + + pub(crate) async fn get_blobs_v1( + &self, + versioned_hashes: Vec, + ) -> Result>>, FetchEngineBlobError> { + let execution_layer = self + .chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + execution_layer + .get_blobs_v1(versioned_hashes) + .await + .map_err(FetchEngineBlobError::RequestFailed) + } + + pub(crate) async fn get_blobs_v2( + &self, + versioned_hashes: Vec, + ) -> Result>>, FetchEngineBlobError> { + let execution_layer = self + .chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + execution_layer + .get_blobs_v2(versioned_hashes) + .await + .map_err(FetchEngineBlobError::RequestFailed) + } + + pub(crate) fn verify_blob_for_gossip( + &self, + blob: &Arc>, + ) -> Result, GossipBlobError> { + GossipVerifiedBlob::::new(blob.clone(), blob.index, &self.chain) + } + + pub(crate) fn verify_data_columns_kzg( + &self, + data_columns: Vec>>, + ) -> Result>, KzgError> { + KzgVerifiedDataColumn::from_batch(data_columns, &self.chain.kzg) + } + + pub(crate) fn known_for_proposal( + &self, + proposal_key: ProposalKey, + ) -> Option> { + self.chain + .observed_column_sidecars + .read() + .known_for_proposal(&proposal_key) + .cloned() + } + + pub(crate) fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { + self.chain + .data_availability_checker + .cached_data_column_indexes(block_root) + } + + pub(crate) async fn process_engine_blobs( + &self, + slot: Slot, + block_root: Hash256, + blobs: EngineGetBlobsOutput, + ) -> Result { + self.chain + .process_engine_blobs(slot, block_root, blobs) + .await + .map_err(FetchEngineBlobError::BlobProcessingError) + } + + pub(crate) fn fork_choice_contains_block(&self, block_root: &Hash256) -> bool { + self.chain + .canonical_head + .fork_choice_read_lock() + .contains_block(block_root) + } +} diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs similarity index 57% rename from beacon_node/beacon_chain/src/fetch_blobs.rs rename to beacon_node/beacon_chain/src/fetch_blobs/mod.rs index d91f103b9d..e02405ddba 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -8,8 +8,17 @@ //! broadcasting blobs requires a much higher bandwidth, and is only done by high capacity //! supernodes. +mod fetch_blobs_beacon_adapter; +#[cfg(test)] +mod tests; + use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::block_verification_types::AsBlock; +use crate::data_column_verification::KzgVerifiedCustodyDataColumn; +#[cfg_attr(test, double)] +use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter; use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::observed_block_producers::ProposalKey; use crate::observed_data_sidecars::DoNotObserve; use crate::{ metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, @@ -18,32 +27,28 @@ use crate::{ use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; use execution_layer::Error as ExecutionLayerError; use metrics::{inc_counter, TryExt}; +#[cfg(test)] +use mockall_double::double; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::collections::HashSet; use std::sync::Arc; -use tracing::debug; +use tracing::{debug, warn}; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconStateError, Blob, BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecarList, EthSpec, - FullPayload, Hash256, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, + BeaconStateError, Blob, BlobSidecar, ChainSpec, ColumnIndex, EthSpec, FullPayload, Hash256, + KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, }; -/// Blobs or data column to be published to the gossip network. -pub enum BlobsOrDataColumns { +/// Result from engine get blobs to be passed onto `DataAvailabilityChecker` and published to the +/// gossip network. The blobs / data columns have not been marked as observed yet, as they may not +/// be published immediately. +#[derive(Debug)] +pub enum EngineGetBlobsOutput { Blobs(Vec>), - DataColumns(DataColumnSidecarList), -} - -/// Result from engine get blobs to be passed onto `DataAvailabilityChecker`. -/// -/// The blobs are retrieved from a trusted EL and columns are computed locally, therefore they are -/// considered valid without requiring extra validation. -pub enum EngineGetBlobsOutput { - Blobs(FixedBlobSidecarList), /// A filtered list of custody data columns to be imported into the `DataAvailabilityChecker`. - CustodyColumns(DataColumnSidecarList), + CustodyColumns(Vec>), } #[derive(Debug)] @@ -56,8 +61,10 @@ pub enum FetchEngineBlobError { ExecutionLayerMissing, InternalError(String), GossipBlob(GossipBlobError), + KzgError(kzg::Error), RequestFailed(ExecutionLayerError), RuntimeShutdown, + TokioJoin(tokio::task::JoinError), } /// Fetches blobs from the EL mempool and processes them. It also broadcasts unseen blobs or @@ -67,7 +74,26 @@ pub async fn fetch_and_process_engine_blobs( block_root: Hash256, block: Arc>>, custody_columns: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, +) -> Result, FetchEngineBlobError> { + fetch_and_process_engine_blobs_inner( + FetchBlobsBeaconAdapter::new(chain), + block_root, + block, + custody_columns, + publish_fn, + ) + .await +} + +/// Internal implementation of fetch blobs, which uses `FetchBlobsBeaconAdapter` instead of +/// `BeaconChain` for better testability. +async fn fetch_and_process_engine_blobs_inner( + chain_adapter: FetchBlobsBeaconAdapter, + block_root: Hash256, + block: Arc>>, + custody_columns: HashSet, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { let versioned_hashes = if let Some(kzg_commitments) = block .message() @@ -90,9 +116,12 @@ pub async fn fetch_and_process_engine_blobs( "Fetching blobs from the EL" ); - if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + if chain_adapter + .spec() + .is_peer_das_enabled_for_epoch(block.epoch()) + { fetch_and_process_blobs_v2( - chain, + chain_adapter, block_root, block, versioned_hashes, @@ -101,32 +130,33 @@ pub async fn fetch_and_process_engine_blobs( ) .await } else { - fetch_and_process_blobs_v1(chain, block_root, block, versioned_hashes, publish_fn).await + fetch_and_process_blobs_v1( + chain_adapter, + block_root, + block, + versioned_hashes, + publish_fn, + ) + .await } } async fn fetch_and_process_blobs_v1( - chain: Arc>, + chain_adapter: FetchBlobsBeaconAdapter, block_root: Hash256, block: Arc>, versioned_hashes: Vec, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + Sized, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + Sized, ) -> Result, FetchEngineBlobError> { let num_expected_blobs = versioned_hashes.len(); - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; - metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); debug!(num_expected_blobs, "Fetching blobs from the EL"); - let response = execution_layer + let response = chain_adapter .get_blobs_v1(versioned_hashes) .await .inspect_err(|_| { inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); - }) - .map_err(FetchEngineBlobError::RequestFailed)?; + })?; let num_fetched_blobs = response.iter().filter(|opt| opt.is_some()).count(); metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); @@ -136,9 +166,22 @@ async fn fetch_and_process_blobs_v1( inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); return Ok(None); } else { + debug!( + num_expected_blobs, + num_fetched_blobs, "Received blobs from the EL" + ); inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); } + if chain_adapter.fork_choice_contains_block(&block_root) { + // Avoid computing sidecars if the block has already been imported. + debug!( + info = "block has already been imported", + "Ignoring EL blobs response" + ); + return Ok(None); + } + let (signed_block_header, kzg_commitments_proof) = block .signed_block_header_and_kzg_commitments_proof() .map_err(FetchEngineBlobError::BeaconStateError)?; @@ -148,7 +191,7 @@ async fn fetch_and_process_blobs_v1( response, signed_block_header, &kzg_commitments_proof, - &chain.spec, + chain_adapter.spec(), )?; // Gossip verify blobs before publishing. This prevents blobs with invalid KZG proofs from @@ -157,10 +200,10 @@ async fn fetch_and_process_blobs_v1( // and be accepted (and propagated) while we are waiting to publish. Just before publishing // we will observe the blobs/columns and only proceed with publishing if they are not yet seen. let blobs_to_import_and_publish = fixed_blob_sidecar_list - .iter() + .into_iter() .filter_map(|opt_blob| { let blob = opt_blob.as_ref()?; - match GossipVerifiedBlob::::new(blob.clone(), blob.index, &chain) { + match chain_adapter.verify_blob_for_gossip(blob) { Ok(verified) => Some(Ok(verified)), // Ignore already seen blobs. Err(GossipBlobError::RepeatBlob { .. }) => None, @@ -170,79 +213,78 @@ async fn fetch_and_process_blobs_v1( .collect::, _>>() .map_err(FetchEngineBlobError::GossipBlob)?; - if !blobs_to_import_and_publish.is_empty() { - publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); + if blobs_to_import_and_publish.is_empty() { + return Ok(None); } - debug!(num_fetched_blobs, "Processing engine blobs"); + publish_fn(EngineGetBlobsOutput::Blobs( + blobs_to_import_and_publish.clone(), + )); - let availability_processing_status = chain + let availability_processing_status = chain_adapter .process_engine_blobs( block.slot(), block_root, - EngineGetBlobsOutput::Blobs(fixed_blob_sidecar_list.clone()), + EngineGetBlobsOutput::Blobs(blobs_to_import_and_publish), ) - .await - .map_err(FetchEngineBlobError::BlobProcessingError)?; + .await?; Ok(Some(availability_processing_status)) } async fn fetch_and_process_blobs_v2( - chain: Arc>, + chain_adapter: FetchBlobsBeaconAdapter, block_root: Hash256, block: Arc>, versioned_hashes: Vec, custody_columns_indices: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { let num_expected_blobs = versioned_hashes.len(); - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); debug!(num_expected_blobs, "Fetching blobs from the EL"); - let response = execution_layer + let response = chain_adapter .get_blobs_v2(versioned_hashes) .await .inspect_err(|_| { inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); - }) - .map_err(FetchEngineBlobError::RequestFailed)?; + })?; - let (blobs, proofs): (Vec<_>, Vec<_>) = response + let Some(blobs_and_proofs) = response else { + debug!(num_expected_blobs, "No blobs fetched from the EL"); + inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); + return Ok(None); + }; + + let (blobs, proofs): (Vec<_>, Vec<_>) = blobs_and_proofs .into_iter() - .filter_map(|blob_and_proof_opt| { - blob_and_proof_opt.map(|blob_and_proof| { - let BlobAndProofV2 { blob, proofs } = blob_and_proof; - (blob, proofs) - }) + .map(|blob_and_proof| { + let BlobAndProofV2 { blob, proofs } = blob_and_proof; + (blob, proofs) }) .unzip(); let num_fetched_blobs = blobs.len(); metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); - // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. if num_fetched_blobs != num_expected_blobs { - debug!( - info = "Unable to compute data columns", - num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL" + // This scenario is not supposed to happen if the EL is spec compliant. + // It should either return all requested blobs or none, but NOT partial responses. + // If we attempt to compute columns with partial blobs, we'd end up with invalid columns. + warn!( + num_fetched_blobs, + num_expected_blobs, "The EL did not return all requested blobs" ); inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); return Ok(None); - } else { - inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); } - if chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - { - // Avoid computing columns if block has already been imported. + debug!(num_fetched_blobs, "All expected blobs received from the EL"); + inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); + + if chain_adapter.fork_choice_contains_block(&block_root) { + // Avoid computing columns if the block has already been imported. debug!( info = "block has already been imported", "Ignoring EL blobs response" @@ -250,41 +292,54 @@ async fn fetch_and_process_blobs_v2( return Ok(None); } - let custody_columns = compute_and_publish_data_columns( - &chain, + let chain_adapter = Arc::new(chain_adapter); + let custody_columns_to_import = compute_custody_columns_to_import( + &chain_adapter, + block_root, block.clone(), blobs, proofs, custody_columns_indices, - publish_fn, ) .await?; - debug!(num_fetched_blobs, "Processing engine blobs"); + if custody_columns_to_import.is_empty() { + debug!( + info = "No new data columns to import", + "Ignoring EL blobs response" + ); + return Ok(None); + } - let availability_processing_status = chain + publish_fn(EngineGetBlobsOutput::CustodyColumns( + custody_columns_to_import.clone(), + )); + + let availability_processing_status = chain_adapter .process_engine_blobs( block.slot(), block_root, - EngineGetBlobsOutput::CustodyColumns(custody_columns), + EngineGetBlobsOutput::CustodyColumns(custody_columns_to_import), ) - .await - .map_err(FetchEngineBlobError::BlobProcessingError)?; + .await?; Ok(Some(availability_processing_status)) } /// Offload the data column computation to a blocking task to avoid holding up the async runtime. -async fn compute_and_publish_data_columns( - chain: &Arc>, +async fn compute_custody_columns_to_import( + chain_adapter: &Arc>, + block_root: Hash256, block: Arc>>, blobs: Vec>, proofs: Vec>, custody_columns_indices: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, -) -> Result, FetchEngineBlobError> { - let chain_cloned = chain.clone(); - chain +) -> Result>, FetchEngineBlobError> { + let kzg = chain_adapter.kzg().clone(); + let spec = chain_adapter.spec().clone(); + let chain_adapter_cloned = chain_adapter.clone(); + chain_adapter + .executor() .spawn_blocking_handle( move || { let mut timer = metrics::start_timer_vec( @@ -294,34 +349,61 @@ async fn compute_and_publish_data_columns( let blob_refs = blobs.iter().collect::>(); let cell_proofs = proofs.into_iter().flatten().collect(); - let data_columns_result = blobs_to_data_column_sidecars( - &blob_refs, - cell_proofs, - &block, - &chain_cloned.kzg, - &chain_cloned.spec, - ) - .discard_timer_on_break(&mut timer); + let data_columns_result = + blobs_to_data_column_sidecars(&blob_refs, cell_proofs, &block, &kzg, &spec) + .discard_timer_on_break(&mut timer); drop(timer); // This filtering ensures we only import and publish the custody columns. // `DataAvailabilityChecker` requires a strict match on custody columns count to // consider a block available. - let custody_columns = data_columns_result + let mut custody_columns = data_columns_result .map(|mut data_columns| { data_columns.retain(|col| custody_columns_indices.contains(&col.index)); data_columns }) .map_err(FetchEngineBlobError::DataColumnSidecarError)?; - publish_fn(BlobsOrDataColumns::DataColumns(custody_columns.clone())); - Ok(custody_columns) + // Only consider columns that are not already observed on gossip. + if let Some(observed_columns) = chain_adapter_cloned.known_for_proposal( + ProposalKey::new(block.message().proposer_index(), block.slot()), + ) { + custody_columns.retain(|col| !observed_columns.contains(&col.index)); + if custody_columns.is_empty() { + return Ok(vec![]); + } + } + + // Only consider columns that are not already known to data availability. + if let Some(known_columns) = + chain_adapter_cloned.cached_data_column_indexes(&block_root) + { + custody_columns.retain(|col| !known_columns.contains(&col.index)); + if custody_columns.is_empty() { + return Ok(vec![]); + } + } + + // KZG verify data columns before publishing. This prevents blobs with invalid + // KZG proofs from the EL making it into the data availability checker. We do not + // immediately add these blobs to the observed blobs/columns cache because we want + // to allow blobs/columns to arrive on gossip and be accepted (and propagated) while + // we are waiting to publish. Just before publishing we will observe the blobs/columns + // and only proceed with publishing if they are not yet seen. + let verified = chain_adapter_cloned + .verify_data_columns_kzg(custody_columns) + .map_err(FetchEngineBlobError::KzgError)?; + + Ok(verified + .into_iter() + .map(KzgVerifiedCustodyDataColumn::from_asserted_custody) + .collect()) }, - "compute_and_publish_data_columns", + "compute_custody_columns_to_import", ) + .ok_or(FetchEngineBlobError::RuntimeShutdown)? .await - .map_err(|e| FetchEngineBlobError::BeaconChainError(Box::new(e))) - .and_then(|r| r) + .map_err(FetchEngineBlobError::TokioJoin)? } fn build_blob_sidecars( diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs new file mode 100644 index 0000000000..3178020c75 --- /dev/null +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -0,0 +1,614 @@ +use crate::data_column_verification::KzgVerifiedDataColumn; +use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter; +use crate::fetch_blobs::{ + fetch_and_process_engine_blobs_inner, EngineGetBlobsOutput, FetchEngineBlobError, +}; +use crate::test_utils::{get_kzg, EphemeralHarnessType}; +use crate::AvailabilityProcessingStatus; +use bls::Signature; +use eth2::types::BlobsBundle; +use execution_layer::json_structures::{BlobAndProof, BlobAndProofV1, BlobAndProofV2}; +use execution_layer::test_utils::generate_blobs; +use maplit::hashset; +use std::sync::{Arc, Mutex}; +use task_executor::test_utils::TestRuntime; +use types::{ + BeaconBlock, BeaconBlockFulu, EmptyBlock, EthSpec, ForkName, Hash256, MainnetEthSpec, + SignedBeaconBlock, SignedBeaconBlockFulu, +}; + +type E = MainnetEthSpec; +type T = EphemeralHarnessType; + +mod get_blobs_v2 { + use super::*; + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_no_blobs_in_block() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, _s) = mock_publish_fn(); + let block = SignedBeaconBlock::::Fulu(SignedBeaconBlockFulu { + message: BeaconBlockFulu::empty(mock_adapter.spec()), + signature: Signature::empty(), + }); + let block_root = block.canonical_root(); + + // Expectations: engine fetch blobs should not be triggered + mock_adapter.expect_get_blobs_v2().times(0); + mock_adapter.expect_process_engine_blobs().times(0); + + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + Arc::new(block), + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_no_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, _) = mock_publish_fn(); + let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // No blobs in EL response + mock_get_blobs_v2_response(&mut mock_adapter, None); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_partial_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, mut blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // Missing blob in EL response + blobs_and_proofs.pop(); + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_block_imported_after_el_response() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // All blobs returned, but fork choice already imported the block + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_no_new_columns_to_import() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // **GIVEN**: + // All blobs returned + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + // block not yet imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // All data columns already seen on gossip + mock_adapter + .expect_known_for_proposal() + .returning(|_| Some(hashset![0, 1, 2])); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // **WHEN**: Trigger `fetch_blobs` on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // **THEN**: Should NOT be processed and no columns should be published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_success() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // All blobs returned, fork choice doesn't contain block + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + mock_adapter.expect_known_for_proposal().returning(|_| None); + mock_adapter + .expect_cached_data_column_indexes() + .returning(|_| None); + mock_adapter + .expect_verify_data_columns_kzg() + .returning(|c| { + Ok(c.into_iter() + .map(KzgVerifiedDataColumn::__new_for_testing) + .collect()) + }); + mock_process_engine_blobs_result( + &mut mock_adapter, + Ok(AvailabilityProcessingStatus::Imported(block_root)), + ); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!( + processing_status, + Some(AvailabilityProcessingStatus::Imported(block_root)) + ); + + let published_columns = extract_published_blobs(publish_fn_args); + assert!( + matches!( + published_columns, + EngineGetBlobsOutput::CustodyColumns(columns) if columns.len() == custody_columns.len() + ), + "should publish custody columns" + ); + } + + fn mock_get_blobs_v2_response( + mock_adapter: &mut MockFetchBlobsBeaconAdapter, + blobs_and_proofs_opt: Option>>, + ) { + let blobs_and_proofs_v2_opt = blobs_and_proofs_opt.map(|blobs_and_proofs| { + blobs_and_proofs + .into_iter() + .map(|blob_and_proof| match blob_and_proof { + BlobAndProof::V2(inner) => inner, + _ => panic!("BlobAndProofV2 not expected"), + }) + .collect() + }); + mock_adapter + .expect_get_blobs_v2() + .return_once(move |_| Ok(blobs_and_proofs_v2_opt)); + } +} + +mod get_blobs_v1 { + use super::*; + use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; + use crate::block_verification_types::AsBlock; + + const ELECTRA_FORK: ForkName = ForkName::Electra; + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_no_blobs_in_block() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let spec = mock_adapter.spec(); + let (publish_fn, _s) = mock_publish_fn(); + let block_no_blobs = + SignedBeaconBlock::from_block(BeaconBlock::empty(spec), Signature::empty()); + let block_root = block_no_blobs.canonical_root(); + + // Expectations: engine fetch blobs should not be triggered + mock_adapter.expect_get_blobs_v1().times(0); + + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + Arc::new(block_no_blobs), + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // THEN: No blob is processed + assert_eq!(processing_status, None); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_no_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, _) = mock_publish_fn(); + let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // GIVEN: No blobs in EL response + let expected_blob_count = block.message().body().blob_kzg_commitments().unwrap().len(); + mock_get_blobs_v1_response(&mut mock_adapter, vec![None; expected_blob_count]); + + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // THEN: No blob is processed + assert_eq!(processing_status, None); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_partial_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let blob_count = 2; + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, blob_count); + let block_slot = block.slot(); + let block_root = block.canonical_root(); + + // GIVEN: Missing a blob in EL response (remove 1 blob from response) + let mut blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + blob_and_proof_opts.first_mut().unwrap().take(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + // AND block is not imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // AND all blobs returned are valid + mock_adapter + .expect_verify_blob_for_gossip() + .returning(|b| Ok(GossipVerifiedBlob::__assumed_valid(b.clone()))); + // Returned blobs should be processed + mock_process_engine_blobs_result( + &mut mock_adapter, + Ok(AvailabilityProcessingStatus::MissingComponents( + block_slot, block_root, + )), + ); + + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // THEN: Returned blobs are processed and published + assert_eq!( + processing_status, + Some(AvailabilityProcessingStatus::MissingComponents( + block_slot, block_root, + )) + ); + assert!( + matches!( + extract_published_blobs(publish_fn_args), + EngineGetBlobsOutput::Blobs(blobs) if blobs.len() == blob_count - 1 + ), + "partial blob results should still be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_block_imported_after_el_response() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // GIVEN: All blobs returned, but fork choice already imported the block + let blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]); + + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // THEN: Returned blobs should NOT be processed or published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no blobs should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_no_new_blobs_to_import() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // **GIVEN**: + // All blobs returned + let blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + // block not yet imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // All blobs already seen on gossip + mock_adapter.expect_verify_blob_for_gossip().returning(|b| { + Err(GossipBlobError::RepeatBlob { + proposer: b.block_proposer_index(), + slot: b.slot(), + index: b.index, + }) + }); + + // **WHEN**: Trigger `fetch_blobs` on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // **THEN**: Should NOT be processed and no blobs should be published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no blobs should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_success() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let blob_count = 2; + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, blob_count); + let block_root = block.canonical_root(); + + // All blobs returned, fork choice doesn't contain block + let blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + mock_adapter + .expect_verify_blob_for_gossip() + .returning(|b| Ok(GossipVerifiedBlob::__assumed_valid(b.clone()))); + mock_process_engine_blobs_result( + &mut mock_adapter, + Ok(AvailabilityProcessingStatus::Imported(block_root)), + ); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // THEN all fetched blobs are processed and published + assert_eq!( + processing_status, + Some(AvailabilityProcessingStatus::Imported(block_root)) + ); + + let published_blobs = extract_published_blobs(publish_fn_args); + assert!( + matches!( + published_blobs, + EngineGetBlobsOutput::Blobs(blobs) if blobs.len() == blob_count + ), + "should publish fetched blobs" + ); + } + + fn mock_get_blobs_v1_response( + mock_adapter: &mut MockFetchBlobsBeaconAdapter, + blobs_and_proofs_opt: Vec>>, + ) { + let blobs_and_proofs_v1 = blobs_and_proofs_opt + .into_iter() + .map(|blob_and_proof_opt| { + blob_and_proof_opt.map(|blob_and_proof| match blob_and_proof { + BlobAndProof::V1(inner) => inner, + _ => panic!("BlobAndProofV1 not expected"), + }) + }) + .collect(); + mock_adapter + .expect_get_blobs_v1() + .return_once(move |_| Ok(blobs_and_proofs_v1)); + } +} + +/// Extract the `EngineGetBlobsOutput` passed to the `publish_fn`. +fn extract_published_blobs( + publish_fn_args: Arc>>>, +) -> EngineGetBlobsOutput { + let mut calls = publish_fn_args.lock().unwrap(); + assert_eq!(calls.len(), 1); + calls.pop().unwrap() +} + +fn mock_process_engine_blobs_result( + mock_adapter: &mut MockFetchBlobsBeaconAdapter, + result: Result, +) { + mock_adapter + .expect_process_engine_blobs() + .return_once(move |_, _, _| result); +} + +fn mock_fork_choice_contains_block( + mock_adapter: &mut MockFetchBlobsBeaconAdapter, + block_roots: Vec, +) { + mock_adapter + .expect_fork_choice_contains_block() + .returning(move |block_root| block_roots.contains(block_root)); +} + +fn create_test_block_and_blobs( + mock_adapter: &MockFetchBlobsBeaconAdapter, + blob_count: usize, +) -> (Arc>, Vec>) { + let mut block = + SignedBeaconBlock::from_block(BeaconBlock::empty(mock_adapter.spec()), Signature::empty()); + let fork = block.fork_name_unchecked(); + let (blobs_bundle, _tx) = generate_blobs::(blob_count, fork).unwrap(); + let BlobsBundle { + commitments, + proofs, + blobs, + } = blobs_bundle; + + *block + .message_mut() + .body_mut() + .blob_kzg_commitments_mut() + .unwrap() = commitments; + + let blobs_and_proofs = if fork.fulu_enabled() { + let proofs_len = proofs.len() / blobs.len(); + blobs + .into_iter() + .zip(proofs.chunks(proofs_len)) + .map(|(blob, proofs)| { + BlobAndProof::V2(BlobAndProofV2 { + blob, + proofs: proofs.to_vec().into(), + }) + }) + .collect() + } else { + blobs + .into_iter() + .zip(proofs) + .map(|(blob, proof)| BlobAndProof::V1(BlobAndProofV1 { blob, proof })) + .collect() + }; + + (Arc::new(block), blobs_and_proofs) +} + +#[allow(clippy::type_complexity)] +fn mock_publish_fn() -> ( + impl Fn(EngineGetBlobsOutput) + Send + 'static, + Arc>>>, +) { + // Keep track of the arguments captured by `publish_fn`. + let captured_args = Arc::new(Mutex::new(vec![])); + let captured_args_clone = captured_args.clone(); + let publish_fn = move |args| { + let mut lock = captured_args_clone.lock().unwrap(); + lock.push(args); + }; + (publish_fn, captured_args) +} + +fn mock_beacon_adapter(fork_name: ForkName) -> MockFetchBlobsBeaconAdapter { + let test_runtime = TestRuntime::default(); + let spec = Arc::new(fork_name.make_genesis_spec(E::default_spec())); + let kzg = get_kzg(&spec); + + let mut mock_adapter = MockFetchBlobsBeaconAdapter::default(); + mock_adapter.expect_spec().return_const(spec.clone()); + mock_adapter.expect_kzg().return_const(kzg.clone()); + mock_adapter + .expect_executor() + .return_const(test_runtime.task_executor.clone()); + mock_adapter +} diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 348e6d52a6..57e1939316 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -151,6 +151,7 @@ impl BeaconChain { // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_u64()..prev_block_slot.as_u64()).rev() { + debug!(%slot, ?block_root, "Storing frozen block to root mapping"); cold_batch.push(KeyValueStoreOp::PutKeyValue( DBColumn::BeaconBlockRoots, slot.to_be_bytes().to_vec(), diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 704fb3663f..4ac00a6e9b 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -25,7 +25,7 @@ fn ssz_blob_to_crypto_blob_boxed(blob: &Blob) -> Result(cell: &Cell) -> Result { +fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result, KzgError> { let cell_bytes: &[u8] = cell.as_ref(); Ok(cell_bytes .try_into() @@ -187,9 +187,9 @@ pub fn blobs_to_data_column_sidecars( .collect::>(); // NOTE: assumes blob sidecars are ordered by index - let blob_cells_and_proofs_vec = blobs + let zipped: Vec<_> = blobs.iter().zip(proof_chunks).collect(); + let blob_cells_and_proofs_vec = zipped .into_par_iter() - .zip(proof_chunks.into_par_iter()) .map(|(blob, proofs)| { let blob = blob .as_ref() diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5b79312d37..df253bf72c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -24,8 +24,6 @@ pub mod deneb_readiness; mod early_attester_cache; pub mod electra_readiness; mod errors; -pub mod eth1_chain; -mod eth1_finalization_cache; pub mod events; pub mod execution_payload; pub mod fetch_blobs; @@ -47,7 +45,8 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; -mod persisted_beacon_chain; +pub mod persisted_beacon_chain; +pub mod persisted_custody; mod persisted_fork_choice; mod pre_finalization_cache; pub mod proposer_prep_service; @@ -59,6 +58,7 @@ pub mod summaries_dag; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; +pub mod validator_custody; pub mod validator_monitor; pub mod validator_pubkey_cache; @@ -84,7 +84,6 @@ pub use block_verification::{ pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; -pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; @@ -100,3 +99,4 @@ pub use state_processing::per_block_processing::errors::{ }; pub use store; pub use types; +pub use validator_custody::CustodyContext; diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index b7b6d1df18..3099c451c0 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -374,7 +374,7 @@ impl LightClientServerCache { let Some(current_sync_committee_branch) = store.get_sync_committee_branch(block_root)? else { return Err(BeaconChainError::LightClientBootstrapError(format!( - "Sync committee branch for block root {:?} not found", + "Sync committee branch for block root {:?} not found. This typically occurs when the block is not a finalized checkpoint. Light client bootstrap is only supported for finalized checkpoint block roots.", block_root ))); }; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 57012161ec..5ca764821f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -260,7 +260,7 @@ pub static UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: LazyLock> LazyLock::new(|| { try_create_histogram( "beacon_attestation_processing_state_skip_seconds", - "Time spent on reading the state during attestation processing", + "Time spent on skipping the state during attestation processing", ) }); pub static ATTESTATION_PROCESSING_SIGNATURE_SETUP_TIMES: LazyLock> = @@ -607,12 +607,6 @@ pub static PERSIST_OP_POOL: LazyLock> = LazyLock::new(|| { "Time taken to persist the operations pool", ) }); -pub static PERSIST_ETH1_CACHE: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_persist_eth1_cache", - "Time taken to persist the eth1 caches", - ) -}); pub static PERSIST_FORK_CHOICE: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_persist_fork_choice", @@ -1340,13 +1334,14 @@ pub static BEACON_BLOCK_DELAY_OBSERVED_SLOT_START: LazyLock> = ) }); -pub static BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START: LazyLock> = - LazyLock::new(|| { +pub static BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START: LazyLock> = LazyLock::new( + || { try_create_int_gauge( "beacon_blob_delay_all_observed_slot_start", - "Duration between the start of the block's slot and the time the block was observed.", + "Duration between the start of the block's slot and the time when all blobs have been observed.", ) - }); + }, +); pub static BEACON_BLOCK_DELAY_CONSENSUS_VERIFICATION_TIME: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 03c468a35e..09534fc4cc 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -1,5 +1,5 @@ use crate::errors::BeaconChainError; -use crate::summaries_dag::{DAGStateSummaryV22, Error as SummariesDagError, StateSummariesDAG}; +use crate::summaries_dag::{DAGStateSummary, Error as SummariesDagError, StateSummariesDAG}; use parking_lot::Mutex; use std::collections::HashSet; use std::mem; @@ -7,7 +7,7 @@ use std::sync::{mpsc, Arc}; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::{migrate_database, HotColdDBError}; -use store::{Error, ItemStore, StoreOp}; +use store::{Error, ItemStore, Split, StoreOp}; pub use store::{HotColdDB, MemoryStore}; use tracing::{debug, error, info, warn}; use types::{BeaconState, BeaconStateHash, Checkpoint, Epoch, EthSpec, Hash256, Slot}; @@ -343,18 +343,23 @@ impl, Cold: ItemStore> BackgroundMigrator {} + Ok(split_change) => { + // Migration run, return the split before the migration + split_change.previous + } Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( slot = slot.as_u64(), "Database migration postponed, unaligned finalized block" ); + // Migration did not run, return the current split info + db.get_split_info() } Err(e) => { warn!(error = ?e, "Database migration failed"); @@ -367,6 +372,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, new_finalized_checkpoint: Checkpoint, + split_prior_to_migration: Split, ) -> Result { let new_finalized_slot = new_finalized_checkpoint .epoch @@ -519,6 +526,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, BeaconChainError>>()?; - - // De-duplicate block roots to reduce block reads below - let summary_block_roots = HashSet::::from_iter( - state_summaries - .iter() - .map(|(_, summary)| summary.latest_block_root), - ); + .map(|(state_root, summary)| (state_root, summary.into())) + .collect::>(); // Sanity check, there is at least one summary with the new finalized block root - if !summary_block_roots.contains(&new_finalized_checkpoint.root) { + if !state_summaries + .iter() + .any(|(_, s)| s.latest_block_root == new_finalized_checkpoint.root) + { return Err(BeaconChainError::PruningError( PruningError::MissingSummaryForFinalizedCheckpoint( new_finalized_checkpoint.root, @@ -562,16 +551,31 @@ impl, Cold: ItemStore> BackgroundMigrator 1 { + let state_summaries_dag_roots_post_split = state_summaries_dag_roots + .iter() + .filter(|(_, s)| s.slot >= split_prior_to_migration.slot) + .collect::>(); + + // Because of the additional HDiffs kept for the grid prior to finalization the tree_roots + // function will consider them roots. Those are expected. We just want to assert that the + // relevant tree of states (post-split) is well-formed. + // + // This warning could also fire if we have imported a block that doesn't descend from the + // new finalized state, and has had its ancestor state summaries pruned by a previous + // run. See: https://github.com/sigp/lighthouse/issues/7270. + if state_summaries_dag_roots_post_split.len() > 1 { warn!( - state_summaries_dag_roots = ?state_summaries_dag_roots, + location = "pruning", + new_finalized_state_root = ?new_finalized_state_root, + split_prior_to_migration_slot = %split_prior_to_migration.slot, + state_summaries_dag_roots_post_split = ?state_summaries_dag_roots_post_split, error = "summaries dag found more than one root", "Notify the devs your hot DB has some inconsistency. Pruning will fix it but devs want to know about it", ); @@ -626,10 +630,17 @@ impl, Cold: ItemStore> BackgroundMigrator = HashSet::new(); let mut states_to_prune: HashSet<(Slot, Hash256)> = HashSet::new(); + let mut kept_summaries_for_hdiff = vec![]; // Consider the following block tree where we finalize block `[0]` at the checkpoint `(f)`. // There's a block `[3]` that descendends from the finalized block but NOT from the @@ -650,6 +661,30 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator ObservedDataSidecars { Ok(is_known) } + pub fn known_for_proposal(&self, proposal_key: &ProposalKey) -> Option<&HashSet> { + self.items.get(proposal_key) + } + fn sanitize_data_sidecar(&self, data_sidecar: &T) -> Result<(), Error> { if data_sidecar.index() >= T::max_num_of_items(&self.spec, data_sidecar.slot()) as u64 { return Err(Error::InvalidDataIndex(data_sidecar.index())); @@ -161,6 +165,7 @@ pub trait ObservationStrategy { /// Type for messages that are observed immediately. pub struct Observe; /// Type for messages that have not been observed. +#[derive(Debug)] pub struct DoNotObserve; impl ObservationStrategy for Observe { diff --git a/beacon_node/beacon_chain/src/persisted_custody.rs b/beacon_node/beacon_chain/src/persisted_custody.rs new file mode 100644 index 0000000000..6ede473b36 --- /dev/null +++ b/beacon_node/beacon_chain/src/persisted_custody.rs @@ -0,0 +1,46 @@ +use crate::validator_custody::CustodyContextSsz; +use ssz::{Decode, Encode}; +use std::sync::Arc; +use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem}; +use types::{EthSpec, Hash256}; + +/// 32-byte key for accessing the `CustodyContext`. All zero because `CustodyContext` has its own column. +pub const CUSTODY_DB_KEY: Hash256 = Hash256::ZERO; + +pub struct PersistedCustody(CustodyContextSsz); + +pub fn load_custody_context, Cold: ItemStore>( + store: Arc>, +) -> Option { + let res: Result, _> = + store.get_item::(&CUSTODY_DB_KEY); + // Load context from the store + match res { + Ok(Some(c)) => Some(c.0), + _ => None, + } +} + +/// Attempt to persist the custody context object to `self.store`. +pub fn persist_custody_context, Cold: ItemStore>( + store: Arc>, + custody_context: CustodyContextSsz, +) -> Result<(), store::Error> { + store.put_item(&CUSTODY_DB_KEY, &PersistedCustody(custody_context)) +} + +impl StoreItem for PersistedCustody { + fn db_column() -> DBColumn { + DBColumn::CustodyContext + } + + fn as_store_bytes(&self) -> Vec { + self.0.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + let custody_context = CustodyContextSsz::from_ssz_bytes(bytes)?; + + Ok(PersistedCustody(custody_context)) + } +} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 49aa116f6c..0abb48494a 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,20 +1,17 @@ //! Utilities for managing database schema changes. -mod migration_schema_v20; -mod migration_schema_v21; -mod migration_schema_v22; mod migration_schema_v23; +mod migration_schema_v24; +mod migration_schema_v25; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::Error as StoreError; -use types::Hash256; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( db: Arc>, - genesis_state_root: Option, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { @@ -24,40 +21,19 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), genesis_state_root, from, next)?; - migrate_schema::(db, genesis_state_root, next, to) + migrate_schema::(db.clone(), from, next)?; + migrate_schema::(db, next, to) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), genesis_state_root, from, next)?; - migrate_schema::(db, genesis_state_root, next, to) + migrate_schema::(db.clone(), from, next)?; + migrate_schema::(db, next, to) } // - // Migrations from before SchemaVersion(19) are deprecated. + // Migrations from before SchemaVersion(22) are deprecated. // - (SchemaVersion(19), SchemaVersion(20)) => { - let ops = migration_schema_v20::upgrade_to_v20::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(20), SchemaVersion(19)) => { - let ops = migration_schema_v20::downgrade_from_v20::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(20), SchemaVersion(21)) => { - let ops = migration_schema_v21::upgrade_to_v21::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(21), SchemaVersion(20)) => { - let ops = migration_schema_v21::downgrade_from_v21::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(21), SchemaVersion(22)) => { - // This migration needs to sync data between hot and cold DBs. The schema version is - // bumped inside the upgrade_to_v22 fn - migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root) - } (SchemaVersion(22), SchemaVersion(23)) => { let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; db.store_schema_version_atomically(to, ops) @@ -66,6 +42,22 @@ pub fn migrate_schema( let ops = migration_schema_v23::downgrade_from_v23::(db.clone())?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(23), SchemaVersion(24)) => { + let ops = migration_schema_v24::upgrade_to_v24::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(24), SchemaVersion(23)) => { + let ops = migration_schema_v24::downgrade_from_v24::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(24), SchemaVersion(25)) => { + let ops = migration_schema_v25::upgrade_to_v25()?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(25), SchemaVersion(24)) => { + let ops = migration_schema_v25::downgrade_from_v25()?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs deleted file mode 100644 index 13fde349f5..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs +++ /dev/null @@ -1,111 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; -use operation_pool::{ - PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, -}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::{debug, info}; -use types::Attestation; - -pub fn upgrade_to_v20( - db: Arc>, -) -> Result, Error> { - info!("Upgrading from v19 to v20"); - - // Load a V15 op pool and transform it to V20. - let Some(PersistedOperationPoolV15:: { - attestations_v15, - sync_contributions, - attester_slashings_v15, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!("Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - let attestations = attestations_v15 - .into_iter() - .map(|(attestation, indices)| (Attestation::Base(attestation).into(), indices)) - .collect(); - - let attester_slashings = attester_slashings_v15 - .into_iter() - .map(|slashing| slashing.into()) - .collect(); - - let v20 = PersistedOperationPool::V20(PersistedOperationPoolV20 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }); - Ok(vec![v20.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v20( - db: Arc>, -) -> Result, Error> { - info!("Downgrading from v20 to v19"); - - // Load a V20 op pool and transform it to V15. - let Some(PersistedOperationPoolV20:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!("Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - let attestations_v15 = attestations - .into_iter() - .filter_map(|(attestation, indices)| { - if let Attestation::Base(attestation) = attestation.into() { - Some((attestation, indices)) - } else { - info!( - reason = "not a base attestation", - "Dropping attestation during downgrade" - ); - None - } - }) - .collect(); - - let attester_slashings_v15 = attester_slashings - .into_iter() - .filter_map(|slashing| match slashing.try_into() { - Ok(slashing) => Some(slashing), - Err(_) => { - info!( - reason = "not a base attester slashing", - "Dropping attester slashing during downgrade" - ); - None - } - }) - .collect(); - - let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { - attestations_v15, - sync_contributions, - attester_slashings_v15, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }); - Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs deleted file mode 100644 index d73660cf3c..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use crate::validator_pubkey_cache::DatabasePubkey; -use ssz::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; -use tracing::info; -use types::{Hash256, PublicKey}; - -const LOG_EVERY: usize = 200_000; - -pub fn upgrade_to_v21( - db: Arc>, -) -> Result, Error> { - info!("Upgrading from v20 to v21"); - - let mut ops = vec![]; - - // Iterate through all pubkeys and decompress them. - for (i, res) in db - .hot_db - .iter_column::(DBColumn::PubkeyCache) - .enumerate() - { - let (key, value) = res?; - let pubkey = PublicKey::from_ssz_bytes(&value)?; - let decompressed = DatabasePubkey::from_pubkey(&pubkey); - ops.push(decompressed.as_kv_store_op(key)); - - if i > 0 && i % LOG_EVERY == 0 { - info!( - keys_decompressed = i, - "Public key decompression in progress" - ); - } - } - info!("Public key decompression complete"); - - Ok(ops) -} - -pub fn downgrade_from_v21( - db: Arc>, -) -> Result, Error> { - info!("Downgrading from v21 to v20"); - - let mut ops = vec![]; - - // Iterate through all pubkeys and recompress them. - for (i, res) in db - .hot_db - .iter_column::(DBColumn::PubkeyCache) - .enumerate() - { - let (key, value) = res?; - let decompressed = DatabasePubkey::from_ssz_bytes(&value)?; - let (_, pubkey_bytes) = decompressed.as_pubkey().map_err(|e| Error::DBError { - message: format!("{e:?}"), - })?; - - ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::PubkeyCache, - key.as_slice().to_vec(), - pubkey_bytes.as_ssz_bytes(), - )); - - if i > 0 && i % LOG_EVERY == 0 { - info!(keys_compressed = i, "Public key compression in progress"); - } - } - - info!("Public key compression complete"); - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs deleted file mode 100644 index a995f9d6b4..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ /dev/null @@ -1,196 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use std::sync::Arc; -use store::chunked_iter::ChunkedVectorIter; -use store::{ - chunked_vector::BlockRootsChunked, - metadata::{ - SchemaVersion, ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN, - }, - partial_beacon_state::PartialBeaconState, - AnchorInfo, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, -}; -use tracing::info; -use types::{BeaconState, Hash256, Slot}; - -const LOG_EVERY: usize = 200_000; - -fn load_old_schema_frozen_state( - db: &HotColdDB, - state_root: Hash256, -) -> Result>, Error> { - let Some(partial_state_bytes) = db - .cold_db - .get_bytes(DBColumn::BeaconState, state_root.as_slice())? - else { - return Ok(None); - }; - let mut partial_state: PartialBeaconState = - PartialBeaconState::from_ssz_bytes(&partial_state_bytes, db.get_chain_spec())?; - - // Fill in the fields of the partial state. - partial_state.load_block_roots(&db.cold_db, db.get_chain_spec())?; - partial_state.load_state_roots(&db.cold_db, db.get_chain_spec())?; - partial_state.load_historical_roots(&db.cold_db, db.get_chain_spec())?; - partial_state.load_randao_mixes(&db.cold_db, db.get_chain_spec())?; - partial_state.load_historical_summaries(&db.cold_db, db.get_chain_spec())?; - - partial_state.try_into().map(Some) -} - -pub fn upgrade_to_v22( - db: Arc>, - genesis_state_root: Option, -) -> Result<(), Error> { - info!("Upgrading DB schema from v21 to v22"); - - let old_anchor = db.get_anchor_info(); - - // If the anchor was uninitialized in the old schema (`None`), this represents a full archive - // node. - let effective_anchor = if old_anchor == ANCHOR_UNINITIALIZED { - ANCHOR_FOR_ARCHIVE_NODE - } else { - old_anchor.clone() - }; - - let split_slot = db.get_split_slot(); - let genesis_state_root = genesis_state_root.ok_or(Error::GenesisStateUnknown)?; - - let mut cold_ops = vec![]; - - // Load the genesis state in the previous chunked format, BEFORE we go deleting or rewriting - // anything. - let mut genesis_state = load_old_schema_frozen_state::(&db, genesis_state_root)? - .ok_or(Error::MissingGenesisState)?; - let genesis_state_root = genesis_state.update_tree_hash_cache()?; - let genesis_block_root = genesis_state.get_latest_block_root(genesis_state_root); - - // Store the genesis state in the new format, prior to updating the schema version on disk. - // In case of a crash no data is lost because we will re-load it in the old format and re-do - // this write. - if split_slot > 0 { - info!( - state_root = ?genesis_state_root, - "Re-storing genesis state" - ); - db.store_cold_state(&genesis_state_root, &genesis_state, &mut cold_ops)?; - } - - // Write the block roots in the new format in a new column. Similar to above, we do this - // separately from deleting the old format block roots so that this is crash safe. - let oldest_block_slot = effective_anchor.oldest_block_slot; - write_new_schema_block_roots::( - &db, - genesis_block_root, - oldest_block_slot, - split_slot, - &mut cold_ops, - )?; - - // Commit this first batch of non-destructive cold database ops. - db.cold_db.do_atomically(cold_ops)?; - - // Now we update the anchor and the schema version atomically in the hot database. - // - // If we crash after commiting this change, then there will be some leftover cruft left in the - // freezer database, but no corruption because all the new-format data has already been written - // above. - let new_anchor = AnchorInfo { - state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, - state_lower_limit: Slot::new(0), - ..effective_anchor.clone() - }; - let hot_ops = vec![db.compare_and_set_anchor_info(old_anchor, new_anchor)?]; - db.store_schema_version_atomically(SchemaVersion(22), hot_ops)?; - - // Finally, clean up the old-format data from the freezer database. - delete_old_schema_freezer_data::(&db)?; - - Ok(()) -} - -pub fn delete_old_schema_freezer_data( - db: &Arc>, -) -> Result<(), Error> { - let mut cold_ops = vec![]; - - let columns = [ - DBColumn::BeaconState, - // Cold state summaries indexed by state root were stored in this column. - DBColumn::BeaconStateSummary, - // Mapping from restore point number to state root was stored in this column. - DBColumn::BeaconRestorePoint, - // Chunked vector values were stored in these columns. - DBColumn::BeaconHistoricalRoots, - DBColumn::BeaconRandaoMixes, - DBColumn::BeaconHistoricalSummaries, - DBColumn::BeaconBlockRootsChunked, - DBColumn::BeaconStateRootsChunked, - ]; - - for column in columns { - for res in db.cold_db.iter_column_keys::>(column) { - let key = res?; - cold_ops.push(KeyValueStoreOp::DeleteKey(column, key)); - } - } - let delete_ops = cold_ops.len(); - - info!(delete_ops, "Deleting historic states"); - db.cold_db.do_atomically(cold_ops)?; - - // In order to reclaim space, we need to compact the freezer DB as well. - db.compact_freezer()?; - - Ok(()) -} - -pub fn write_new_schema_block_roots( - db: &HotColdDB, - genesis_block_root: Hash256, - oldest_block_slot: Slot, - split_slot: Slot, - cold_ops: &mut Vec, -) -> Result<(), Error> { - info!( - %oldest_block_slot, - ?genesis_block_root, - "Starting beacon block root migration" - ); - - // Store the genesis block root if it would otherwise not be stored. - if oldest_block_slot != 0 { - cold_ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconBlockRoots, - 0u64.to_be_bytes().to_vec(), - genesis_block_root.as_slice().to_vec(), - )); - } - - // Block roots are available from the `oldest_block_slot` to the `split_slot`. - let start_vindex = oldest_block_slot.as_usize(); - let block_root_iter = ChunkedVectorIter::::new( - db, - start_vindex, - split_slot, - db.get_chain_spec(), - ); - - // OK to hold these in memory (10M slots * 43 bytes per KV ~= 430 MB). - for (i, (slot, block_root)) in block_root_iter.enumerate() { - cold_ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconBlockRoots, - slot.to_be_bytes().to_vec(), - block_root.as_slice().to_vec(), - )); - - if i > 0 && i % LOG_EVERY == 0 { - info!( - roots_migrated = i, - "Beacon block root migration in progress" - ); - } - } - - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs index d0f8202679..d70f41bb7e 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -43,21 +43,26 @@ pub fn upgrade_to_v23( let state_root = state_root_result?; debug!( ?state_root, - "Deleting temporary state flag on v23 schema migration" + "Deleting temporary state on v23 schema migration" ); ops.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconStateTemporary, state_root.as_slice().to_vec(), )); - // Here we SHOULD delete the items for key `state_root` in columns `BeaconState` and - // `BeaconStateSummary`. However, in the event we have dangling temporary states at the time - // of the migration, the first pruning routine will prune them. They will be a tree branch / - // root not part of the finalized tree and trigger a warning log once. - // - // We believe there may be race conditions concerning temporary flags where a necessary - // canonical state is marked as temporary. In current stable, a restart with that DB will - // corrupt the DB. In the unlikely case this happens we choose to leave the states and - // allow pruning to clean them. + + // We also delete the temporary states themselves. Although there are known issue with + // temporary states and this could lead to DB corruption, we will only corrupt the DB in + // cases where the DB would be corrupted by restarting on v7.0.x. We consider these DBs + // "too far gone". Deleting here has the advantage of not generating warnings about + // disjoint state DAGs in the v24 upgrade, or the first pruning after migration. + ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + )); + ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateSummary, + state_root.as_slice().to_vec(), + )); } Ok(ops) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs new file mode 100644 index 0000000000..6901c99cee --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs @@ -0,0 +1,605 @@ +use crate::{ + beacon_chain::BeaconChainTypes, + summaries_dag::{DAGStateSummary, DAGStateSummaryV22, StateSummariesDAG}, +}; +use ssz::{Decode, DecodeError, Encode}; +use ssz_derive::Encode; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use store::{ + hdiff::StorageStrategy, + hot_cold_store::{HotStateSummaryV22, OptionalDiffBaseState}, + DBColumn, Error, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem, +}; +use tracing::{debug, info, warn}; +use types::{ + BeaconState, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, CACHED_EPOCHS, +}; + +/// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. +/// +/// We delete it as part of the v24 migration. +pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); + +pub fn store_full_state_v22( + state_root: &Hash256, + state: &BeaconState, + ops: &mut Vec, +) -> Result<(), Error> { + let bytes = StorageContainer::new(state).as_ssz_bytes(); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + bytes, + )); + Ok(()) +} + +/// Fetch a V22 state from the database either as a full state or using block replay. +pub fn get_state_v22( + db: &Arc>, + state_root: &Hash256, + spec: &ChainSpec, +) -> Result>, Error> { + let Some(summary) = db.get_item::(state_root)? else { + return Ok(None); + }; + let Some(base_state) = + get_full_state_v22(&db.hot_db, &summary.epoch_boundary_state_root, spec)? + else { + return Ok(None); + }; + // Loading hot states via block replay doesn't care about the schema version, so we can use + // the DB's current method for this. + let update_cache = false; + db.load_hot_state_using_replay( + base_state, + summary.slot, + summary.latest_block_root, + update_cache, + ) + .map(Some) +} + +pub fn get_full_state_v22, E: EthSpec>( + db: &KV, + state_root: &Hash256, + spec: &ChainSpec, +) -> Result>, Error> { + match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? { + Some(bytes) => { + let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; + Ok(Some(container.try_into()?)) + } + None => Ok(None), + } +} + +/// A container for storing `BeaconState` components. +/// +/// DEPRECATED. +#[derive(Encode)] +pub struct StorageContainer { + state: BeaconState, + committee_caches: Vec>, +} + +impl StorageContainer { + /// Create a new instance for storing a `BeaconState`. + pub fn new(state: &BeaconState) -> Self { + Self { + state: state.clone(), + committee_caches: state.committee_caches().to_vec(), + } + } + + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't + // compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here. + let mut builder = ssz::SszDecoderBuilder::new(bytes); + + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + + let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?; + let committee_caches = decoder.decode_next()?; + + Ok(Self { + state, + committee_caches, + }) + } +} + +impl TryInto> for StorageContainer { + type Error = Error; + + fn try_into(mut self) -> Result, Error> { + let mut state = self.state; + + for i in (0..CACHED_EPOCHS).rev() { + if i >= self.committee_caches.len() { + return Err(Error::SszDecodeError(DecodeError::BytesInvalid( + "Insufficient committees for BeaconState".to_string(), + ))); + }; + + state.committee_caches_mut()[i] = self.committee_caches.remove(i); + } + + Ok(state) + } +} + +/// The checkpoint used for pruning the database. +/// +/// Updated whenever pruning is successful. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct PruningCheckpoint { + pub checkpoint: Checkpoint, +} + +impl StoreItem for PruningCheckpoint { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.checkpoint.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(PruningCheckpoint { + checkpoint: Checkpoint::from_ssz_bytes(bytes)?, + }) + } +} + +pub fn upgrade_to_v24( + db: Arc>, +) -> Result, Error> { + let mut migrate_ops = vec![]; + let split = db.get_split_info(); + let hot_hdiff_start_slot = split.slot; + + // Delete the `PruningCheckpoint` (no longer used). + migrate_ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconMeta, + PRUNING_CHECKPOINT_KEY.as_slice().to_vec(), + )); + + // Sanity check to make sure the HDiff grid is aligned with the epoch start + if hot_hdiff_start_slot % T::EthSpec::slots_per_epoch() != 0 { + return Err(Error::MigrationError(format!( + "hot_hdiff_start_slot is not first slot in epoch {hot_hdiff_start_slot}" + ))); + } + + // After V24 hot tree states, the in-memory `anchor_info.anchor_slot` is the start slot of the + // hot HDiff grid. Before the migration, it's set to the slot of the anchor state in the DB: + // - the genesis state on a genesis sync, or + // - the checkpoint state on a checkpoint sync. + // + // If the node has been running for a while the `anchor_slot` might be less than the finalized + // checkpoint. This upgrade constructs a grid only with unfinalized states, rooted in the + // current finalized state. So we set the `anchor_slot` to `split.slot` to root the grid in the + // current finalized state. Each migration sets the split to + // ``` + // Split { slot: finalized_state.slot(), state_root: finalized_state_root } + // ``` + { + let anchor_info = db.get_anchor_info(); + + // If the node is already an archive node, we can set the anchor slot to 0 and copy + // snapshots and diffs from the freezer DB to the hot DB in order to establish an initial + // hot grid that is aligned/"perfect" (no `start_slot`/`anchor_slot` to worry about). + // + // This only works if all of the following are true: + // + // - We have the previous snapshot for the split state stored in the freezer DB, i.e. + // if `previous_snapshot_slot >= state_upper_limit`. + // - The split state itself will be stored as a diff or snapshot in the new grid. We choose + // not to support a split state that requires block replay, because computing its previous + // state root from the DAG is not straight-forward. + let dummy_start_slot = Slot::new(0); + let closest_layer_points = db + .hierarchy + .closest_layer_points(split.slot, dummy_start_slot); + + let previous_snapshot_slot = + closest_layer_points + .iter() + .copied() + .min() + .ok_or(Error::MigrationError( + "closest_layer_points must not be empty".to_string(), + ))?; + + if previous_snapshot_slot >= anchor_info.state_upper_limit + && db + .hierarchy + .storage_strategy(split.slot, dummy_start_slot) + .is_ok_and(|strategy| !strategy.is_replay_from()) + { + info!( + %previous_snapshot_slot, + split_slot = %split.slot, + "Aligning hot diff grid to freezer" + ); + + // Set anchor slot to 0 in case it was set to something else by a previous checkpoint + // sync. + let mut new_anchor_info = anchor_info.clone(); + new_anchor_info.anchor_slot = Slot::new(0); + + // Update the anchor on disk atomically if migration is successful + migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?); + + // Copy each of the freezer layers to the hot DB in slot ascending order. + for layer_slot in closest_layer_points.into_iter().rev() { + // Do not try to load the split state itself from the freezer, it won't be there. + // It will be migrated in the main loop below. + if layer_slot == split.slot { + continue; + } + + let mut freezer_state = db.load_cold_state_by_slot(layer_slot)?; + + let state_root = freezer_state.canonical_root()?; + + let mut state_ops = vec![]; + db.store_hot_state(&state_root, &freezer_state, &mut state_ops)?; + db.hot_db.do_atomically(state_ops)?; + } + } else { + // Otherwise for non-archive nodes, set the anchor slot for the hot grid to the current + // split slot (the oldest slot available). + let mut new_anchor_info = anchor_info.clone(); + new_anchor_info.anchor_slot = hot_hdiff_start_slot; + + // Update the anchor in disk atomically if migration is successful + migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?); + } + } + + let state_summaries_dag = new_dag::(&db)?; + + // We compute the state summaries DAG outside of a DB migration. Therefore if the DB is properly + // prunned, it should have a single root equal to the split. + let state_summaries_dag_roots = state_summaries_dag.tree_roots(); + if state_summaries_dag_roots.len() == 1 { + let (root_summary_state_root, root_summary) = + state_summaries_dag_roots.first().expect("len == 1"); + if *root_summary_state_root != split.state_root { + warn!( + ?root_summary_state_root, + ?root_summary, + ?split, + "State summaries DAG root is not the split" + ); + } + } else { + warn!( + location = "migration", + state_summaries_dag_roots = ?state_summaries_dag_roots, + "State summaries DAG found more than one root" + ); + } + + // Sort summaries by slot so we have their ancestor diffs already stored when we store them. + // If the summaries are sorted topologically we can insert them into the DB like if they were a + // new state, re-using existing code. As states are likely to be sequential the diff cache + // should kick in making the migration more efficient. If we just iterate the column of + // summaries we may get distance state of each iteration. + let summaries_by_slot = state_summaries_dag.summaries_by_slot_ascending(); + debug!( + summaries_count = state_summaries_dag.summaries_count(), + slots_count = summaries_by_slot.len(), + min_slot = ?summaries_by_slot.first_key_value().map(|(slot, _)| slot), + max_slot = ?summaries_by_slot.last_key_value().map(|(slot, _)| slot), + ?state_summaries_dag_roots, + %hot_hdiff_start_slot, + split_state_root = ?split.state_root, + "Starting hot states migration" + ); + + // Upgrade all hot DB state summaries to the new type: + // - Set all summaries of boundary states to `Snapshot` type + // - Set all others to `Replay` pointing to `epoch_boundary_state_root` + + let mut diffs_written = 0; + let mut summaries_written = 0; + let mut last_log_time = Instant::now(); + + for (slot, old_hot_state_summaries) in summaries_by_slot { + for (state_root, old_summary) in old_hot_state_summaries { + if slot < hot_hdiff_start_slot { + // To reach here, there must be some pruning issue with the DB where we still have + // hot states below the split slot. This states can't be migrated as we can't compute + // a storage strategy for them. After this if else block, the summary and state are + // scheduled for deletion. + debug!( + %slot, + ?state_root, + "Ignoring state summary prior to split slot" + ); + } else { + // 1. Store snapshot or diff at this slot (if required). + let storage_strategy = db.hot_storage_strategy(slot)?; + debug!( + %slot, + ?state_root, + ?storage_strategy, + "Migrating state summary" + ); + + match storage_strategy { + StorageStrategy::DiffFrom(_) | StorageStrategy::Snapshot => { + // Load the state and re-store it as a snapshot or diff. + let state = get_state_v22::(&db, &state_root, &db.spec)? + .ok_or(Error::MissingState(state_root))?; + + // Store immediately so that future diffs can load and diff from it. + let mut ops = vec![]; + // We must commit the hot state summary immediately, otherwise we can't diff + // against it and future writes will fail. That's why we write the new hot + // summaries in a different column to have both new and old data present at + // once. Otherwise if the process crashes during the migration the database will + // be broken. + db.store_hot_state_summary(&state_root, &state, &mut ops)?; + db.store_hot_state_diffs(&state_root, &state, &mut ops)?; + db.hot_db.do_atomically(ops)?; + diffs_written += 1; + } + StorageStrategy::ReplayFrom(diff_base_slot) => { + // Optimization: instead of having to load the state of each summary we load x32 + // less states by manually computing the HotStateSummary roots using the + // computed state dag. + // + // No need to store diffs for states that will be reconstructed by replaying + // blocks. + // + // 2. Convert the summary to the new format. + if state_root == split.state_root { + return Err(Error::MigrationError( + "unreachable: split state should be stored as a snapshot or diff" + .to_string(), + )); + } + let previous_state_root = state_summaries_dag + .previous_state_root(state_root) + .map_err(|e| { + Error::MigrationError(format!( + "error computing previous_state_root {e:?}" + )) + })?; + + let diff_base_state = OptionalDiffBaseState::new( + diff_base_slot, + state_summaries_dag + .ancestor_state_root_at_slot(state_root, diff_base_slot) + .map_err(|e| { + Error::MigrationError(format!( + "error computing ancestor_state_root_at_slot \ + ({state_root:?}, {diff_base_slot}): {e:?}" + )) + })?, + ); + + let new_summary = HotStateSummary { + slot, + latest_block_root: old_summary.latest_block_root, + latest_block_slot: old_summary.latest_block_slot, + previous_state_root, + diff_base_state, + }; + let op = new_summary.as_kv_store_op(state_root); + // It's not necessary to immediately commit the summaries of states that are + // ReplayFrom. However we do so for simplicity. + db.hot_db.do_atomically(vec![op])?; + } + } + } + + // 3. Stage old data for deletion. + if slot % T::EthSpec::slots_per_epoch() == 0 { + migrate_ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + )); + } + + // Delete previous summaries + migrate_ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateSummary, + state_root.as_slice().to_vec(), + )); + + summaries_written += 1; + if last_log_time.elapsed() > Duration::from_secs(5) { + last_log_time = Instant::now(); + info!( + diffs_written, + summaries_written, + summaries_count = state_summaries_dag.summaries_count(), + "Hot states migration in progress" + ); + } + } + } + + info!( + diffs_written, + summaries_written, + summaries_count = state_summaries_dag.summaries_count(), + "Hot states migration complete" + ); + + Ok(migrate_ops) +} + +pub fn downgrade_from_v24( + db: Arc>, +) -> Result, Error> { + let state_summaries = db + .load_hot_state_summaries()? + .into_iter() + .map(|(state_root, summary)| (state_root, summary.into())) + .collect::>(); + + info!( + summaries_count = state_summaries.len(), + "DB downgrade of v24 state summaries started" + ); + + let state_summaries_dag = StateSummariesDAG::new(state_summaries) + .map_err(|e| Error::MigrationError(format!("Error on new StateSumariesDAG {e:?}")))?; + + let mut migrate_ops = vec![]; + let mut states_written = 0; + let mut summaries_written = 0; + let mut summaries_skipped = 0; + let mut last_log_time = Instant::now(); + + // Rebuild the PruningCheckpoint from the split. + let split = db.get_split_info(); + let pruning_checkpoint = PruningCheckpoint { + checkpoint: Checkpoint { + epoch: split.slot.epoch(T::EthSpec::slots_per_epoch()), + root: split.block_root, + }, + }; + migrate_ops.push(pruning_checkpoint.as_kv_store_op(PRUNING_CHECKPOINT_KEY)); + + // Convert state summaries back to the old format. + for (state_root, summary) in state_summaries_dag + .summaries_by_slot_ascending() + .into_iter() + .flat_map(|(_, summaries)| summaries) + { + // No need to migrate any states prior to the split. The v22 schema does not need them, and + // they would generate warnings about a disjoint DAG when re-upgrading to V24. + if summary.slot < split.slot { + debug!( + slot = %summary.slot, + ?state_root, + "Skipping migration of pre-split state" + ); + summaries_skipped += 1; + continue; + } + + // If boundary state: persist. + // Do not cache these states as they are unlikely to be relevant later. + let update_cache = false; + if summary.slot % T::EthSpec::slots_per_epoch() == 0 { + let (state, _) = db + .load_hot_state(&state_root, update_cache)? + .ok_or(Error::MissingState(state_root))?; + + // Immediately commit the state, so we don't OOM. It's stored in a different + // column so if the migration crashes we'll just store extra harmless junk in the DB. + let mut state_write_ops = vec![]; + store_full_state_v22(&state_root, &state, &mut state_write_ops)?; + db.hot_db.do_atomically(state_write_ops)?; + states_written += 1; + } + + // Persist old summary. + let epoch_boundary_state_slot = summary.slot - summary.slot % T::EthSpec::slots_per_epoch(); + let old_summary = HotStateSummaryV22 { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + epoch_boundary_state_root: state_summaries_dag + .ancestor_state_root_at_slot(state_root, epoch_boundary_state_slot) + .map_err(|e| { + Error::MigrationError(format!( + "error computing ancestor_state_root_at_slot({state_root:?}, {epoch_boundary_state_slot}) {e:?}" + )) + })?, + }; + migrate_ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateSummary, + state_root.as_slice().to_vec(), + old_summary.as_ssz_bytes(), + )); + summaries_written += 1; + + if last_log_time.elapsed() > Duration::from_secs(5) { + last_log_time = Instant::now(); + info!( + states_written, + summaries_written, + summaries_count = state_summaries_dag.summaries_count(), + "DB downgrade of v24 state summaries in progress" + ); + } + } + + // Delete all V24 schema data. We do this outside the loop over summaries to ensure we cover + // every piece of data and to simplify logic around skipping certain summaries that do not get + // migrated. + for db_column in [ + DBColumn::BeaconStateHotSummary, + DBColumn::BeaconStateHotDiff, + DBColumn::BeaconStateHotSnapshot, + ] { + for key in db.hot_db.iter_column_keys::(db_column) { + let state_root = key?; + migrate_ops.push(KeyValueStoreOp::DeleteKey( + db_column, + state_root.as_slice().to_vec(), + )); + } + } + + info!( + states_written, + summaries_written, + summaries_skipped, + summaries_count = state_summaries_dag.summaries_count(), + "DB downgrade of v24 state summaries completed" + ); + + Ok(migrate_ops) +} + +fn new_dag( + db: &HotColdDB, +) -> Result { + // Collect all sumaries for unfinalized states + let state_summaries_v22 = db + .hot_db + // Collect summaries from the legacy V22 column BeaconStateSummary + .iter_column::(DBColumn::BeaconStateSummary) + .map(|res| { + let (key, value) = res?; + let state_root: Hash256 = key; + let summary = HotStateSummaryV22::from_ssz_bytes(&value)?; + let block_root = summary.latest_block_root; + // Read blocks to get the block slot and parent root. In Holesky forced finalization it + // took 5100 ms to read 15072 state summaries, so it's not really necessary to + // de-duplicate block reads. + let block = db + .get_blinded_block(&block_root)? + .ok_or(Error::MissingBlock(block_root))?; + + Ok(( + state_root, + DAGStateSummaryV22 { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + block_slot: block.slot(), + block_parent_root: block.parent_root(), + }, + )) + }) + .collect::, Error>>()?; + + StateSummariesDAG::new_from_v22(state_summaries_v22) + .map_err(|e| Error::MigrationError(format!("error computing states summaries dag {e:?}"))) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs new file mode 100644 index 0000000000..44e8894d6f --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs @@ -0,0 +1,20 @@ +use store::{DBColumn, Error, KeyValueStoreOp}; +use tracing::info; +use types::Hash256; + +pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; + +/// Delete the on-disk eth1 data. +pub fn upgrade_to_v25() -> Result, Error> { + info!("Deleting eth1 data from disk for v25 DB upgrade"); + Ok(vec![KeyValueStoreOp::DeleteKey( + DBColumn::Eth1Cache, + ETH1_CACHE_DB_KEY.as_slice().to_vec(), + )]) +} + +/// No-op: we don't need to recreate on-disk eth1 data, as previous versions gracefully handle +/// data missing from disk. +pub fn downgrade_from_v25() -> Result, Error> { + Ok(vec![]) +} diff --git a/beacon_node/beacon_chain/src/single_attestation.rs b/beacon_node/beacon_chain/src/single_attestation.rs index fa4f98bb07..33a093687e 100644 --- a/beacon_node/beacon_chain/src/single_attestation.rs +++ b/beacon_node/beacon_chain/src/single_attestation.rs @@ -1,9 +1,13 @@ use crate::attestation_verification::Error; -use types::{Attestation, AttestationElectra, BitList, BitVector, EthSpec, SingleAttestation}; +use types::{ + Attestation, AttestationBase, AttestationElectra, BitList, BitVector, EthSpec, ForkName, + SingleAttestation, +}; pub fn single_attestation_to_attestation( single_attestation: &SingleAttestation, committee: &[usize], + fork_name: ForkName, ) -> Result, Error> { let attester_index = single_attestation.attester_index; let committee_index = single_attestation.committee_index; @@ -24,23 +28,33 @@ pub fn single_attestation_to_attestation( slot, })?; - let mut committee_bits: BitVector = BitVector::default(); - committee_bits - .set(committee_index as usize, true) - .map_err(|e| Error::Invalid(e.into()))?; + if fork_name.electra_enabled() { + let mut committee_bits: BitVector = BitVector::default(); + committee_bits + .set(committee_index as usize, true) + .map_err(|e| Error::Invalid(e.into()))?; - let mut aggregation_bits = - BitList::with_capacity(committee.len()).map_err(|e| Error::Invalid(e.into()))?; - aggregation_bits - .set(aggregation_bit, true) - .map_err(|e| Error::Invalid(e.into()))?; - - // TODO(electra): consider eventually allowing conversion to non-Electra attestations as well - // to maintain invertability (`Attestation` -> `SingleAttestation` -> `Attestation`). - Ok(Attestation::Electra(AttestationElectra { - aggregation_bits, - committee_bits, - data: single_attestation.data.clone(), - signature: single_attestation.signature.clone(), - })) + let mut aggregation_bits = + BitList::with_capacity(committee.len()).map_err(|e| Error::Invalid(e.into()))?; + aggregation_bits + .set(aggregation_bit, true) + .map_err(|e| Error::Invalid(e.into()))?; + Ok(Attestation::Electra(AttestationElectra { + aggregation_bits, + committee_bits, + data: single_attestation.data.clone(), + signature: single_attestation.signature.clone(), + })) + } else { + let mut aggregation_bits = + BitList::with_capacity(committee.len()).map_err(|e| Error::Invalid(e.into()))?; + aggregation_bits + .set(aggregation_bit, true) + .map_err(|e| Error::Invalid(e.into()))?; + Ok(Attestation::Base(AttestationBase { + aggregation_bits, + data: single_attestation.data.clone(), + signature: single_attestation.signature.clone(), + })) + } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f206405f67..ad7e31a8f0 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -377,7 +377,7 @@ fn advance_head(beacon_chain: &Arc>) -> Resu state.current_epoch(), head_block_root, state - .get_beacon_proposer_indices(&beacon_chain.spec) + .get_beacon_proposer_indices(state.current_epoch(), &beacon_chain.spec) .map_err(BeaconChainError::from)?, state.fork(), ) diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs index 8dff2ac7be..42d078baeb 100644 --- a/beacon_node/beacon_chain/src/summaries_dag.rs +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -3,6 +3,7 @@ use std::{ cmp::Ordering, collections::{btree_map::Entry, BTreeMap, HashMap}, }; +use store::HotStateSummary; use types::{Hash256, Slot}; #[derive(Debug, Clone, Copy)] @@ -57,6 +58,12 @@ pub enum Error { root_state_root: Hash256, root_state_slot: Slot, }, + CircularAncestorChain { + state_root: Hash256, + previous_state_root: Hash256, + slot: Slot, + last_slot: Slot, + }, } impl StateSummariesDAG { @@ -311,10 +318,24 @@ impl StateSummariesDAG { } let mut ancestors = vec![]; + let mut last_slot = None; loop { if let Some(summary) = self.state_summaries_by_state_root.get(&state_root) { + // Detect cycles, including the case where `previous_state_root == state_root`. + if let Some(last_slot) = last_slot { + if summary.slot >= last_slot { + return Err(Error::CircularAncestorChain { + state_root, + previous_state_root: summary.previous_state_root, + slot: summary.slot, + last_slot, + }); + } + } + ancestors.push((state_root, summary.slot)); - state_root = summary.previous_state_root + last_slot = Some(summary.slot); + state_root = summary.previous_state_root; } else { return Ok(ancestors); } @@ -336,6 +357,17 @@ impl StateSummariesDAG { } } +impl From for DAGStateSummary { + fn from(value: HotStateSummary) -> Self { + Self { + slot: value.slot, + latest_block_root: value.latest_block_root, + latest_block_slot: value.latest_block_slot, + previous_state_root: value.previous_state_root, + } + } +} + #[cfg(test)] mod tests { use super::{DAGStateSummaryV22, Error, StateSummariesDAG}; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d3689f7068..db4e2fab26 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -5,7 +5,7 @@ use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ - beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, + beacon_chain::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, single_attestation::single_attestation_to_attestation, sync_committee_verification::Error as SyncCommitteeError, @@ -14,7 +14,6 @@ pub use crate::{ }; use crate::{ builder::{BeaconChainBuilder, Witness}, - eth1_chain::CachingEth1Backend, BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; @@ -72,7 +71,7 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Pre-computed data column sidecar using a single static blob from: // `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz` -const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = +pub const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz"); // Default target aggregators to set during testing, this ensures an aggregator at each slot. @@ -116,7 +115,7 @@ pub fn get_kzg(spec: &ChainSpec) -> Arc { } pub type BaseHarnessType = - Witness, E, THotStore, TColdStore>; + Witness; pub type DiskHarnessType = BaseHarnessType, BeaconNodeBackend>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; @@ -516,11 +515,7 @@ where self } - pub fn mock_execution_layer(self) -> Self { - self.mock_execution_layer_with_config() - } - - pub fn mock_execution_layer_with_config(mut self) -> Self { + pub fn mock_execution_layer(mut self) -> Self { let mock = mock_execution_layer_from_parts::( self.spec.clone().expect("cannot build without spec"), self.runtime.task_executor.clone(), @@ -579,8 +574,6 @@ where ) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) - .dummy_eth1_backend() - .expect("should build dummy backend") .shutdown_sender(shutdown_tx) .chain_config(chain_config) .import_all_data_columns(self.import_all_data_columns) @@ -613,12 +606,6 @@ where let chain = builder.build().expect("should build"); - let sampling_column_count = if self.import_all_data_columns { - chain.spec.number_of_custody_groups as usize - } else { - chain.spec.custody_requirement as usize - }; - BeaconChainHarness { spec: chain.spec.clone(), chain: Arc::new(chain), @@ -629,7 +616,6 @@ where mock_execution_layer: self.mock_execution_layer, mock_builder: None, rng: make_rng(), - sampling_column_count, } } } @@ -686,7 +672,6 @@ pub struct BeaconChainHarness { pub mock_execution_layer: Option>, pub mock_builder: Option>>, - pub sampling_column_count: usize, pub rng: Mutex, } @@ -789,7 +774,10 @@ where } pub fn get_sampling_column_count(&self) -> usize { - self.sampling_column_count + self.chain + .data_availability_checker + .custody_context() + .sampling_size(None, &self.chain.spec) as usize } pub fn slots_per_epoch(&self) -> u64 { @@ -1127,9 +1115,14 @@ where attn.aggregation_bits .set(aggregation_bit_index, true) .unwrap(); - attn + Attestation::Electra(attn) + } + Attestation::Base(mut attn) => { + attn.aggregation_bits + .set(aggregation_bit_index, true) + .unwrap(); + Attestation::Base(attn) } - Attestation::Base(_) => panic!("Must be an Electra attestation"), }; let aggregation_bits = attestation.get_aggregation_bits(); @@ -1157,8 +1150,10 @@ where let single_attestation = attestation.to_single_attestation_with_attester_index(attester_index as u64)?; + let fork_name = self.spec.fork_name_at_slot::(attestation.data().slot); let attestation: Attestation = - single_attestation_to_attestation(&single_attestation, committee.committee).unwrap(); + single_attestation_to_attestation(&single_attestation, committee.committee, fork_name) + .unwrap(); assert_eq!( single_attestation.committee_index, @@ -2364,7 +2359,7 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { - return RpcBlock::new_without_blobs(Some(block_root), block, 0); + return RpcBlock::new_without_blobs(Some(block_root), block); } // Blobs are stored as data columns from Fulu (PeerDAS) @@ -2374,14 +2369,8 @@ where .into_iter() .map(CustodyDataColumn::from_asserted_custody) .collect::>(); - RpcBlock::new_with_custody_columns( - Some(block_root), - block, - custody_columns, - self.get_sampling_column_count(), - &self.spec, - ) - .unwrap() + RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, &self.spec) + .unwrap() } else { let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); RpcBlock::new(Some(block_root), block, blobs).unwrap() @@ -2407,15 +2396,9 @@ where .take(sampling_column_count) .map(CustodyDataColumn::from_asserted_custody) .collect::>(); - RpcBlock::new_with_custody_columns( - Some(block_root), - block, - columns, - sampling_column_count, - &self.spec, - )? + RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)? } else { - RpcBlock::new_without_blobs(Some(block_root), block, 0) + RpcBlock::new_without_blobs(Some(block_root), block) } } else { let blobs = blob_items @@ -2428,7 +2411,11 @@ where }) } - pub fn process_attestations(&self, attestations: HarnessAttestations) { + pub fn process_attestations( + &self, + attestations: HarnessAttestations, + state: &BeaconState, + ) { let num_validators = self.validator_keypairs.len(); let mut unaggregated = Vec::with_capacity(num_validators); // This is an over-allocation, but it should be fine. It won't be *that* memory hungry and @@ -2437,7 +2424,35 @@ where for (unaggregated_attestations, maybe_signed_aggregate) in attestations.iter() { for (attn, subnet) in unaggregated_attestations { - unaggregated.push((attn, Some(*subnet))); + let aggregation_bits = attn.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = state + .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + + let single_attestation = attn + .to_single_attestation_with_attester_index(attester_index as u64) + .unwrap(); + + unaggregated.push((single_attestation, Some(*subnet))); } if let Some(a) = maybe_signed_aggregate { @@ -2447,7 +2462,9 @@ where for result in self .chain - .batch_verify_unaggregated_attestations_for_gossip(unaggregated.into_iter()) + .batch_verify_unaggregated_attestations_for_gossip( + unaggregated.iter().map(|(attn, subnet)| (attn, *subnet)), + ) .unwrap() { let verified = result.unwrap(); @@ -2514,7 +2531,7 @@ where ) { let attestations = self.make_attestations(validators, state, state_root, block_hash, block.slot()); - self.process_attestations(attestations); + self.process_attestations(attestations, state); } pub fn sync_committee_sign_block( diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs new file mode 100644 index 0000000000..1169b64537 --- /dev/null +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -0,0 +1,549 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::atomic::{AtomicU64, Ordering}, +}; + +use parking_lot::RwLock; + +use ssz_derive::{Decode, Encode}; +use types::{ChainSpec, Epoch, EthSpec, Slot}; + +/// A delay before making the CGC change effective to the data availability checker. +const CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS: u64 = 30; + +/// Number of slots after which a validator's registration is removed if it has not re-registered. +const VALIDATOR_REGISTRATION_EXPIRY_SLOTS: Slot = Slot::new(256); + +type ValidatorsAndBalances = Vec<(usize, u64)>; +type SlotAndEffectiveBalance = (Slot, u64); + +/// This currently just registers increases in validator count. +/// Does not handle decreasing validator counts +#[derive(Default, Debug)] +struct ValidatorRegistrations { + /// Set of all validators that is registered to this node along with its effective balance + /// + /// Key is validator index and value is effective_balance. + validators: HashMap, + /// Maintains the validator custody requirement at a given epoch. + /// + /// Note: Only stores the epoch value when there's a change in custody requirement. + /// So if epoch 10 and 11 has the same custody requirement, only 10 is stored. + /// This map is never pruned, because currently we never decrease custody requirement, so this + /// map size is contained at 128. + epoch_validator_custody_requirements: BTreeMap, +} + +impl ValidatorRegistrations { + /// Returns the validator custody requirement at the latest epoch. + fn latest_validator_custody_requirement(&self) -> Option { + self.epoch_validator_custody_requirements + .last_key_value() + .map(|(_, v)| *v) + } + + /// Lookup the active custody requirement at the given epoch. + fn custody_requirement_at_epoch(&self, epoch: Epoch) -> Option { + self.epoch_validator_custody_requirements + .range(..=epoch) + .last() + .map(|(_, custody_count)| *custody_count) + } + + /// Register a new validator index and updates the list of validators if required. + /// Returns `Some((effective_epoch, new_cgc))` if the registration results in a CGC update. + pub(crate) fn register_validators( + &mut self, + validators_and_balance: ValidatorsAndBalances, + current_slot: Slot, + spec: &ChainSpec, + ) -> Option<(Epoch, u64)> { + for (validator_index, effective_balance) in validators_and_balance { + self.validators + .insert(validator_index, (current_slot, effective_balance)); + } + + // Drop validators that haven't re-registered with the node for `VALIDATOR_REGISTRATION_EXPIRY_SLOTS`. + self.validators + .retain(|_, (slot, _)| *slot >= current_slot - VALIDATOR_REGISTRATION_EXPIRY_SLOTS); + + // Each `BALANCE_PER_ADDITIONAL_CUSTODY_GROUP` effectively contributes one unit of "weight". + let validator_custody_units = self.validators.values().map(|(_, eb)| eb).sum::() + / spec.balance_per_additional_custody_group; + let validator_custody_requirement = + get_validators_custody_requirement(validator_custody_units, spec); + + tracing::debug!( + validator_custody_units, + validator_custody_requirement, + "Registered validators" + ); + + // If registering the new validator increased the total validator "units", then + // add a new entry for the current epoch + if Some(validator_custody_requirement) > self.latest_validator_custody_requirement() { + // Apply the change from the next epoch after adding some delay buffer to ensure + // the node has enough time to subscribe to subnets etc, and to avoid having + // inconsistent column counts within an epoch. + let effective_delay_slots = + CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS / spec.seconds_per_slot; + let effective_epoch = + (current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1; + self.epoch_validator_custody_requirements + .entry(effective_epoch) + .and_modify(|old_custody| *old_custody = validator_custody_requirement) + .or_insert(validator_custody_requirement); + Some((effective_epoch, validator_custody_requirement)) + } else { + None + } + } +} + +/// Given the `validator_custody_units`, return the custody requirement based on +/// the spec parameters. +/// +/// Note: a `validator_custody_units` here represents the number of 32 eth effective_balance +/// equivalent to `BALANCE_PER_ADDITIONAL_CUSTODY_GROUP`. +/// +/// For e.g. a validator with eb 32 eth is 1 unit. +/// a validator with eb 65 eth is 65 // 32 = 2 units. +/// +/// See https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/validator.md#validator-custody +fn get_validators_custody_requirement(validator_custody_units: u64, spec: &ChainSpec) -> u64 { + std::cmp::min( + std::cmp::max(validator_custody_units, spec.validator_custody_requirement), + spec.number_of_custody_groups, + ) +} + +/// Contains all the information the node requires to calculate the +/// number of columns to be custodied when checking for DA. +#[derive(Debug)] +pub struct CustodyContext { + /// The Number of custody groups required based on the number of validators + /// that is attached to this node. + /// + /// This is the number that we use to compute the custody group count that + /// we require for data availability check, and we use to advertise to our peers in the metadata + /// and enr values. + validator_custody_count: AtomicU64, + /// Is the node run as a supernode based on current cli parameters. + pub current_is_supernode: bool, + /// The persisted value for `is_supernode` based on the previous run of this node. + /// + /// Note: We require this value because if a user restarts the node with a higher cli custody + /// count value than in the previous run, then we should continue advertising the custody + /// count based on the old value than the new one since we haven't backfilled the required + /// columns. + persisted_is_supernode: bool, + /// Maintains all the validators that this node is connected to currently + validator_registrations: RwLock, +} + +impl CustodyContext { + /// Create a new custody default custody context object when no persisted object + /// exists. + /// + /// The `is_supernode` value is based on current cli parameters. + pub fn new(is_supernode: bool) -> Self { + Self { + validator_custody_count: AtomicU64::new(0), + current_is_supernode: is_supernode, + persisted_is_supernode: is_supernode, + validator_registrations: Default::default(), + } + } + + pub fn new_from_persisted_custody_context( + ssz_context: CustodyContextSsz, + is_supernode: bool, + ) -> Self { + CustodyContext { + validator_custody_count: AtomicU64::new(ssz_context.validator_custody_at_head), + current_is_supernode: is_supernode, + persisted_is_supernode: ssz_context.persisted_is_supernode, + validator_registrations: Default::default(), + } + } + + /// Register a new validator index and updates the list of validators if required. + /// + /// Also modifies the internal structures if the validator custody has changed to + /// update the `custody_column_count`. + /// + /// Returns `Some` along with the updated custody group count if it has changed, otherwise returns `None`. + pub fn register_validators( + &self, + validators_and_balance: ValidatorsAndBalances, + current_slot: Slot, + spec: &ChainSpec, + ) -> Option { + let Some((effective_epoch, new_validator_custody)) = self + .validator_registrations + .write() + .register_validators::(validators_and_balance, current_slot, spec) + else { + return None; + }; + + let current_cgc = self.custody_group_count_at_head(spec); + let validator_custody_count_at_head = self.validator_custody_count.load(Ordering::Relaxed); + + if new_validator_custody != validator_custody_count_at_head { + tracing::debug!( + old_count = validator_custody_count_at_head, + new_count = new_validator_custody, + "Validator count at head updated" + ); + self.validator_custody_count + .store(new_validator_custody, Ordering::Relaxed); + + let updated_cgc = self.custody_group_count_at_head(spec); + // Send the message to network only if there are more columns subnets to subscribe to + if updated_cgc > current_cgc { + tracing::debug!( + old_cgc = current_cgc, + updated_cgc, + "Custody group count updated" + ); + return Some(CustodyCountChanged { + new_custody_group_count: updated_cgc, + sampling_count: self.sampling_size(Some(effective_epoch), spec), + }); + } + } + + None + } + + /// This function is used to determine the custody group count at head ONLY. + /// Do NOT use this directly for data availability check, use `self.sampling_size` instead as + /// CGC can change over epochs. + pub fn custody_group_count_at_head(&self, spec: &ChainSpec) -> u64 { + if self.current_is_supernode { + return spec.number_of_custody_groups; + } + let validator_custody_count_at_head = self.validator_custody_count.load(Ordering::Relaxed); + + // If there are no validators, return the minimum custody_requirement + if validator_custody_count_at_head > 0 { + validator_custody_count_at_head + } else { + spec.custody_requirement + } + } + + /// Returns the count of custody columns this node must sample for a block at `epoch` to import. + /// If an `epoch` is not specified, returns the *current* validator custody requirement. + pub fn sampling_size(&self, epoch_opt: Option, spec: &ChainSpec) -> u64 { + let custody_group_count = if self.current_is_supernode { + spec.number_of_custody_groups + } else if let Some(epoch) = epoch_opt { + self.validator_registrations + .read() + .custody_requirement_at_epoch(epoch) + .unwrap_or(spec.custody_requirement) + } else { + self.custody_group_count_at_head(spec) + }; + + spec.sampling_size(custody_group_count) + .expect("should compute node sampling size from valid chain spec") + } +} + +/// The custody count changed because of a change in the +/// number of validators being managed. +pub struct CustodyCountChanged { + pub new_custody_group_count: u64, + pub sampling_count: u64, +} + +/// The custody information that gets persisted across runs. +#[derive(Debug, Encode, Decode, Clone)] +pub struct CustodyContextSsz { + validator_custody_at_head: u64, + persisted_is_supernode: bool, +} + +impl From<&CustodyContext> for CustodyContextSsz { + fn from(context: &CustodyContext) -> Self { + CustodyContextSsz { + validator_custody_at_head: context.validator_custody_count.load(Ordering::Relaxed), + persisted_is_supernode: context.persisted_is_supernode, + } + } +} + +#[cfg(test)] +mod tests { + use types::MainnetEthSpec; + + use super::*; + + type E = MainnetEthSpec; + + #[test] + fn no_validators_supernode_default() { + let custody_context = CustodyContext::new(true); + let spec = E::default_spec(); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.number_of_custody_groups + ); + assert_eq!( + custody_context.sampling_size(None, &spec), + spec.number_of_custody_groups + ); + } + + #[test] + fn no_validators_fullnode_default() { + let custody_context = CustodyContext::new(false); + let spec = E::default_spec(); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.custody_requirement, + "head custody count should be minimum spec custody requirement" + ); + assert_eq!( + custody_context.sampling_size(None, &spec), + spec.samples_per_slot + ); + } + + #[test] + fn register_single_validator_should_update_cgc() { + let custody_context = CustodyContext::new(false); + let spec = E::default_spec(); + let bal_per_additional_group = spec.balance_per_additional_custody_group; + let min_val_custody_requirement = spec.validator_custody_requirement; + // One single node increases its balance over 3 epochs. + let validators_and_expected_cgc_change = vec![ + ( + vec![(0, bal_per_additional_group)], + Some(min_val_custody_requirement), + ), + // No CGC change at 8 custody units, as it's the minimum requirement + (vec![(0, 8 * bal_per_additional_group)], None), + (vec![(0, 10 * bal_per_additional_group)], Some(10)), + ]; + + register_validators_and_assert_cgc( + &custody_context, + validators_and_expected_cgc_change, + &spec, + ); + } + + #[test] + fn register_multiple_validators_should_update_cgc() { + let custody_context = CustodyContext::new(false); + let spec = E::default_spec(); + let bal_per_additional_group = spec.balance_per_additional_custody_group; + let min_val_custody_requirement = spec.validator_custody_requirement; + // Add 3 validators over 3 epochs. + let validators_and_expected_cgc = vec![ + ( + vec![(0, bal_per_additional_group)], + Some(min_val_custody_requirement), + ), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + ], + // No CGC change at 8 custody units, as it's the minimum requirement + None, + ), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + (2, 2 * bal_per_additional_group), + ], + Some(10), + ), + ]; + + register_validators_and_assert_cgc(&custody_context, validators_and_expected_cgc, &spec); + } + + #[test] + fn register_validators_should_not_update_cgc_for_supernode() { + let custody_context = CustodyContext::new(true); + let spec = E::default_spec(); + let bal_per_additional_group = spec.balance_per_additional_custody_group; + + // Add 3 validators over 3 epochs. + let validators_and_expected_cgc = vec![ + (vec![(0, bal_per_additional_group)], None), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + ], + None, + ), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + (2, 2 * bal_per_additional_group), + ], + None, + ), + ]; + + register_validators_and_assert_cgc(&custody_context, validators_and_expected_cgc, &spec); + assert_eq!( + custody_context.sampling_size(None, &spec), + spec.number_of_custody_groups + ); + } + + #[test] + fn cgc_change_should_be_effective_to_sampling_after_delay() { + let custody_context = CustodyContext::new(false); + let spec = E::default_spec(); + let current_slot = Slot::new(10); + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let default_sampling_size = custody_context.sampling_size(None, &spec); + let validator_custody_units = 10; + + let _cgc_changed = custody_context.register_validators::( + vec![( + 0, + validator_custody_units * spec.balance_per_additional_custody_group, + )], + current_slot, + &spec, + ); + + // CGC update is not applied for `current_epoch`. + assert_eq!( + custody_context.sampling_size(Some(current_epoch), &spec), + default_sampling_size + ); + // CGC update is applied for the next epoch. + assert_eq!( + custody_context.sampling_size(Some(current_epoch + 1), &spec), + validator_custody_units + ); + } + + #[test] + fn validator_dropped_after_no_registrations_within_expiry_should_not_reduce_cgc() { + let custody_context = CustodyContext::new(false); + let spec = E::default_spec(); + let current_slot = Slot::new(10); + let val_custody_units_1 = 10; + let val_custody_units_2 = 5; + + // GIVEN val_1 and val_2 registered at `current_slot` + let _ = custody_context.register_validators::( + vec![ + ( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + ), + ( + 2, + val_custody_units_2 * spec.balance_per_additional_custody_group, + ), + ], + current_slot, + &spec, + ); + + // WHEN val_1 re-registered, but val_2 did not re-register after `VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1` slots + let cgc_changed_opt = custody_context.register_validators::( + vec![( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + )], + current_slot + VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1, + &spec, + ); + + // THEN the reduction from dropping val_2 balance should NOT result in a CGC reduction + assert!(cgc_changed_opt.is_none(), "CGC should remain unchanged"); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + val_custody_units_1 + val_custody_units_2 + ) + } + + #[test] + fn validator_dropped_after_no_registrations_within_expiry() { + let custody_context = CustodyContext::new(false); + let spec = E::default_spec(); + let current_slot = Slot::new(10); + let val_custody_units_1 = 10; + let val_custody_units_2 = 5; + let val_custody_units_3 = 6; + + // GIVEN val_1 and val_2 registered at `current_slot` + let _ = custody_context.register_validators::( + vec![ + ( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + ), + ( + 2, + val_custody_units_2 * spec.balance_per_additional_custody_group, + ), + ], + current_slot, + &spec, + ); + + // WHEN val_1 and val_3 registered, but val_3 did not re-register after `VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1` slots + let cgc_changed = custody_context.register_validators::( + vec![ + ( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + ), + ( + 3, + val_custody_units_3 * spec.balance_per_additional_custody_group, + ), + ], + current_slot + VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1, + &spec, + ); + + // THEN CGC should increase, BUT val_2 balance should NOT be included in CGC + assert_eq!( + cgc_changed + .expect("CGC should change") + .new_custody_group_count, + val_custody_units_1 + val_custody_units_3 + ); + } + + /// Update validator every epoch and assert cgc against expected values. + fn register_validators_and_assert_cgc( + custody_context: &CustodyContext, + validators_and_expected_cgc_changed: Vec<(ValidatorsAndBalances, Option)>, + spec: &ChainSpec, + ) { + for (idx, (validators_and_balance, expected_cgc_change)) in + validators_and_expected_cgc_changed.into_iter().enumerate() + { + let epoch = Epoch::new(idx as u64); + let updated_custody_count_opt = custody_context + .register_validators::( + validators_and_balance, + epoch.start_slot(E::slots_per_epoch()), + spec, + ) + .map(|c| c.new_custody_group_count); + + assert_eq!(updated_custody_count_opt, expected_cgc_change); + } + } +} diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 16f4e3f143..8d3d748e8c 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -342,7 +342,7 @@ impl MonitoredValidator { // Prune while summaries.len() > HISTORIC_EPOCHS { - if let Some(key) = summaries.iter().map(|(epoch, _)| *epoch).min() { + if let Some(key) = summaries.keys().copied().min() { summaries.remove(&key); } } @@ -406,7 +406,6 @@ pub struct ValidatorMonitor { impl ValidatorMonitor { #[instrument(parent = None, - level = "info", name = "validator_monitor", skip_all )] @@ -440,7 +439,6 @@ impl ValidatorMonitor { /// emit metrics and logs on a per-validator basis (rather than just an /// aggregated basis). #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -451,7 +449,6 @@ impl ValidatorMonitor { /// Add some validators to `self` for additional monitoring. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -474,7 +471,6 @@ impl ValidatorMonitor { /// Add an unaggregated attestation #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -493,7 +489,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -505,7 +500,6 @@ impl ValidatorMonitor { /// Reads information from the given `state`. The `state` *must* be valid (i.e, able to be /// imported). #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -623,7 +617,6 @@ impl ValidatorMonitor { /// Add missed non-finalized blocks for the monitored validators #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -725,7 +718,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -744,7 +736,6 @@ impl ValidatorMonitor { /// Process the unaggregated attestations generated by the service `attestation_simulator_service` /// and check if the attestation qualifies for a reward matching the flags source/target/head #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -822,7 +813,6 @@ impl ValidatorMonitor { /// We allow disabling tracking metrics on an individual validator basis /// since it can result in untenable cardinality with high validator counts. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -836,7 +826,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1119,7 +1108,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1132,7 +1120,6 @@ impl ValidatorMonitor { /// Returns the number of validators monitored by `self`. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1143,7 +1130,6 @@ impl ValidatorMonitor { /// Return the `id`'s of all monitored validators. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1153,7 +1139,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1167,7 +1152,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1180,7 +1164,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1192,7 +1175,6 @@ impl ValidatorMonitor { /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1220,7 +1202,6 @@ impl ValidatorMonitor { /// Process a block received on gossip. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1237,7 +1218,6 @@ impl ValidatorMonitor { /// Process a block received on the HTTP API from a local validator. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1253,7 +1233,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1298,7 +1277,6 @@ impl ValidatorMonitor { /// Register an attestation seen on the gossip network. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1319,7 +1297,6 @@ impl ValidatorMonitor { /// Register an attestation seen on the HTTP API. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1339,7 +1316,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1431,7 +1407,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1555,7 +1530,6 @@ impl ValidatorMonitor { /// /// Note: Blocks that get orphaned will skew the inclusion distance calculation. #[instrument(parent = None, - level = "info", name = "validator_monitor", skip_all )] @@ -1635,7 +1609,6 @@ impl ValidatorMonitor { /// Register a sync committee message received over gossip. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1656,7 +1629,6 @@ impl ValidatorMonitor { /// Register a sync committee message received over the http api. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1677,7 +1649,6 @@ impl ValidatorMonitor { /// Register a sync committee message. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1732,7 +1703,6 @@ impl ValidatorMonitor { /// Register a sync committee contribution received over gossip. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1755,7 +1725,6 @@ impl ValidatorMonitor { /// Register a sync committee contribution received over the http api. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1778,7 +1747,6 @@ impl ValidatorMonitor { /// Register a sync committee contribution. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1866,7 +1834,6 @@ impl ValidatorMonitor { /// Register that the `sync_aggregate` was included in a *valid* `BeaconBlock`. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1909,7 +1876,6 @@ impl ValidatorMonitor { /// Register an exit from the gossip network. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1920,7 +1886,6 @@ impl ValidatorMonitor { /// Register an exit from the HTTP API. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1931,7 +1896,6 @@ impl ValidatorMonitor { /// Register an exit included in a *valid* beacon block. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1941,7 +1905,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1970,7 +1933,6 @@ impl ValidatorMonitor { /// Register a proposer slashing from the gossip network. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1981,7 +1943,6 @@ impl ValidatorMonitor { /// Register a proposer slashing from the HTTP API. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -1992,7 +1953,6 @@ impl ValidatorMonitor { /// Register a proposer slashing included in a *valid* `BeaconBlock`. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -2002,7 +1962,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -2041,7 +2000,6 @@ impl ValidatorMonitor { /// Register an attester slashing from the gossip network. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -2052,7 +2010,6 @@ impl ValidatorMonitor { /// Register an attester slashing from the HTTP API. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -2063,7 +2020,6 @@ impl ValidatorMonitor { /// Register an attester slashing included in a *valid* `BeaconBlock`. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -2073,7 +2029,6 @@ impl ValidatorMonitor { } #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all @@ -2120,7 +2075,6 @@ impl ValidatorMonitor { /// /// Should be called whenever Prometheus is scraping Lighthouse. #[instrument(parent = None, - level = "info", fields(service = "validator_monitor"), name = "validator_monitor", skip_all diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 30eec539fc..11729f8d8a 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -8,24 +8,22 @@ use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME}; use beacon_chain::{ attestation_verification::Error as AttnError, test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + single_attestation_to_attestation, test_spec, AttestationStrategy, BeaconChainHarness, + BlockStrategy, EphemeralHarnessType, }, BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, }; use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; -use ssz_types::BitVector; -use state_processing::{ - per_block_processing::errors::AttestationValidationError, per_slot_processing, -}; +use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{ signed_aggregate_and_proof::SignedAggregateAndProofRefMut, test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation, - AttestationRef, AttestationRefMut, BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, Slot, SubnetId, Unsigned, + AttestationRef, ChainSpec, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, Keypair, + MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, + SubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -122,7 +120,7 @@ fn get_harness_capella_spec( /// Also returns some info about who created it. fn get_valid_unaggregated_attestation( chain: &BeaconChain, -) -> (Attestation, usize, usize, SecretKey, SubnetId) { +) -> (SingleAttestation, SecretKey, SubnetId) { let head = chain.head_snapshot(); let current_slot = chain.slot().expect("should get slot"); @@ -156,8 +154,15 @@ fn get_valid_unaggregated_attestation( ) .expect("should sign attestation"); - let subnet_id = SubnetId::compute_subnet_for_attestation::( - valid_attestation.to_ref(), + let single_attestation = SingleAttestation { + committee_index: valid_attestation.committee_index().unwrap(), + attester_index: validator_index as u64, + data: valid_attestation.data().clone(), + signature: valid_attestation.signature().clone(), + }; + + let subnet_id = SubnetId::compute_subnet_for_single_attestation::( + &single_attestation, head.beacon_state .get_committee_count_at_slot(current_slot) .expect("should get committee count"), @@ -165,13 +170,7 @@ fn get_valid_unaggregated_attestation( ) .expect("should get subnet_id"); - ( - valid_attestation, - validator_index, - validator_committee_index, - validator_sk, - subnet_id, - ) + (single_attestation, validator_sk, subnet_id) } fn get_valid_aggregated_attestation( @@ -275,15 +274,13 @@ struct GossipTester { /* * Valid unaggregated attestation */ - valid_attestation: Attestation, - attester_validator_index: usize, - attester_committee_index: usize, + valid_attestation: SingleAttestation, attester_sk: SecretKey, attestation_subnet_id: SubnetId, /* * Valid unaggregated attestation for batch testing */ - invalid_attestation: Attestation, + invalid_attestation: SingleAttestation, /* * Valid aggregate */ @@ -312,22 +309,33 @@ impl GossipTester { // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); - let ( - valid_attestation, - attester_validator_index, - attester_committee_index, - attester_sk, - attestation_subnet_id, - ) = get_valid_unaggregated_attestation(&harness.chain); + let (valid_attestation, attester_sk, attestation_subnet_id) = + get_valid_unaggregated_attestation(&harness.chain); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let committee = state + .get_beacon_committee( + valid_attestation.data.slot, + valid_attestation.committee_index, + ) + .unwrap(); + let fork_name = harness + .chain + .spec + .fork_name_at_slot::(valid_attestation.data.slot); + let valid_aggregate_attestation = + single_attestation_to_attestation(&valid_attestation, committee.committee, fork_name) + .unwrap(); let (valid_aggregate, aggregator_validator_index, aggregator_sk) = - get_valid_aggregated_attestation(&harness.chain, valid_attestation.clone()); + get_valid_aggregated_attestation(&harness.chain, valid_aggregate_attestation.clone()); let mut invalid_attestation = valid_attestation.clone(); - invalid_attestation.data_mut().beacon_block_root = Hash256::repeat_byte(13); + invalid_attestation.data.beacon_block_root = Hash256::repeat_byte(13); let (mut invalid_aggregate, _, _) = - get_valid_aggregated_attestation(&harness.chain, invalid_attestation.clone()); + get_valid_aggregated_attestation(&harness.chain, valid_aggregate_attestation.clone()); match invalid_aggregate.to_mut() { SignedAggregateAndProofRefMut::Base(att) => { @@ -341,8 +349,6 @@ impl GossipTester { Self { harness, valid_attestation, - attester_validator_index, - attester_committee_index, attester_sk, attestation_subnet_id, invalid_attestation, @@ -467,12 +473,12 @@ impl GossipTester { pub fn inspect_unaggregate_err(self, desc: &str, get_attn: G, inspect_err: I) -> Self where - G: Fn(&Self, &mut Attestation, &mut SubnetId), + G: Fn(&Self, &mut SingleAttestation, &mut SubnetId, &ChainSpec), I: Fn(&Self, AttnError), { let mut attn = self.valid_attestation.clone(); let mut subnet_id = self.attestation_subnet_id; - get_attn(&self, &mut attn, &mut subnet_id); + get_attn(&self, &mut attn, &mut subnet_id, &self.harness.spec); /* * Individual verification @@ -912,32 +918,20 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with invalid committee index", - |tester, a, _| { - match a.to_mut() { - AttestationRefMut::Base(attn) => { - attn.data.index = tester - .harness - .chain - .head_snapshot() - .beacon_state - .get_committee_count_at_slot(attn.data.slot) - .unwrap(); - } - AttestationRefMut::Electra(attn) => { - let committee_index = tester - .harness - .chain - .head_snapshot() - .beacon_state - .get_committee_count_at_slot(attn.data.slot) - .unwrap(); - // overwrite the existing committee bits before setting - attn.committee_bits = BitVector::default(); - attn.committee_bits.set(committee_index as usize, true).unwrap(); - } - } + |tester, a, _, _| { + let committee_index = tester + .harness + .chain + .head_snapshot() + .beacon_state + .get_committee_count_at_slot(a.data.slot) + .unwrap(); + + a.committee_index = committee_index; + }, + |_, err| { + assert!(matches!(err, AttnError::NoCommitteeForSlotAndIndex { .. })) }, - |_, err| assert!(matches!(err, AttnError::NoCommitteeForSlotAndIndex { .. })), ) /* * The following test ensures: @@ -946,8 +940,8 @@ async fn unaggregated_gossip_verification() { * attestation.data.slot, attestation.data.index) == subnet_id). */ .inspect_unaggregate_err( - "attestation with invalid committee index", - |_, _, subnet_id| *subnet_id = SubnetId::new(42), + "attestation with invalid subnet_id", + |_, _, subnet_id, _| *subnet_id = SubnetId::new(42), |tester, err| { assert!(matches!( err, @@ -969,7 +963,7 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation from future slot", - |tester, a, _| a.data_mut().slot = tester.slot() + 1, + |tester, a, _, _| a.data.slot = tester.slot() + 1, |tester, err| { assert!(matches!( err, @@ -983,10 +977,10 @@ async fn unaggregated_gossip_verification() { ) .inspect_unaggregate_err( "attestation from past slot", - |tester, a, _| { + |tester, a, _, _| { let too_early_slot = tester.earliest_valid_attestation_slot() - 1; - a.data_mut().slot = too_early_slot; - a.data_mut().target.epoch = too_early_slot.epoch(E::slots_per_epoch()); + a.data.slot = too_early_slot; + a.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); }, |tester, err| { let valid_early_slot = tester.earliest_valid_attestation_slot(); @@ -1010,7 +1004,7 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with invalid target epoch", - |_, a, _| a.data_mut().target.epoch += 1, + |_, a, _, _| a.data.target.epoch += 1, |_, err| { assert!(matches!( err, @@ -1018,104 +1012,6 @@ async fn unaggregated_gossip_verification() { )) }, ) - /* - * The following two tests ensure: - * - * The attestation is unaggregated -- that is, it has exactly one participating validator - * (len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1). - */ - .inspect_unaggregate_err( - "attestation without any aggregation bits set", - |tester, mut a, _| { - match &mut a { - Attestation::Base(ref mut att) => { - att.aggregation_bits - .set(tester.attester_committee_index, false) - .expect("should unset aggregation bit"); - assert_eq!( - att.aggregation_bits.num_set_bits(), - 0, - "test requires no set bits" - ); - } - Attestation::Electra(ref mut att) => { - att.aggregation_bits - .set(tester.attester_committee_index, false) - .expect("should unset aggregation bit"); - assert_eq!( - att.aggregation_bits.num_set_bits(), - 0, - "test requires no set bits" - ); - } - } - }, - |_, err| { - assert!(matches!( - err, - AttnError::NotExactlyOneAggregationBitSet(0) - )) - }, - ) - .inspect_unaggregate_err( - "attestation with two aggregation bits set", - |tester, mut a, _| { - match &mut a { - Attestation::Base(ref mut att) => { - att.aggregation_bits - .set(tester.attester_committee_index + 1, true) - .expect("should set second aggregation bit"); - } - Attestation::Electra(ref mut att) => { - att.aggregation_bits - .set(tester.attester_committee_index + 1, true) - .expect("should set second aggregation bit"); - } - } - }, - |_, err| { - assert!(matches!( - err, - AttnError::NotExactlyOneAggregationBitSet(2) - )) - }, - ) - /* - * The following test ensures: - * - * The number of aggregation bits matches the committee size -- i.e. - * `len(attestation.aggregation_bits) == len(get_beacon_committee(state, data.slot, - * data.index))`. - */ - .inspect_unaggregate_err( - "attestation with invalid bitfield", - |_, mut a, _| { - match &mut a { - Attestation::Base(ref mut att) => { - let bits = att.aggregation_bits.iter().collect::>(); - att.aggregation_bits = BitList::with_capacity(bits.len() + 1).unwrap(); - for (i, bit) in bits.into_iter().enumerate() { - att.aggregation_bits.set(i, bit).unwrap(); - } - } - Attestation::Electra(ref mut att) => { - let bits = att.aggregation_bits.iter().collect::>(); - att.aggregation_bits = BitList::with_capacity(bits.len() + 1).unwrap(); - for (i, bit) in bits.into_iter().enumerate() { - att.aggregation_bits.set(i, bit).unwrap(); - } - } - } - }, - |_, err| { - assert!(matches!( - err, - AttnError::Invalid(AttestationValidationError::BeaconStateError( - BeaconStateError::InvalidBitfield - )) - )) - }, - ) /* * The following test ensures that: * @@ -1123,8 +1019,8 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with unknown head block", - |_, a, _| { - a.data_mut().beacon_block_root = Hash256::repeat_byte(42); + |_, a, _, _| { + a.data.beacon_block_root = Hash256::repeat_byte(42); }, |_, err| { assert!(matches!( @@ -1145,8 +1041,8 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with invalid target root", - |_, a, _| { - a.data_mut().target.root = Hash256::repeat_byte(42); + |_, a, _, _| { + a.data.target.root = Hash256::repeat_byte(42); }, |_, err| { assert!(matches!( @@ -1162,10 +1058,10 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with bad signature", - |tester, a, _| { + |tester, a, _, _| { let mut agg_sig = AggregateSignature::infinity(); agg_sig.add_assign(&tester.attester_sk.sign(Hash256::repeat_byte(42))); - *a.signature_mut() = agg_sig; + a.signature = agg_sig; }, |_, err| { assert!(matches!( @@ -1186,7 +1082,7 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation that has already been seen", - |_, _, _| {}, + |_, _, _, _| {}, |tester, err| { assert!(matches!( err, @@ -1194,7 +1090,7 @@ async fn unaggregated_gossip_verification() { validator_index, epoch, } - if validator_index == tester.attester_validator_index as u64 && epoch == tester.epoch() + if validator_index == tester.valid_attestation.attester_index && epoch == tester.epoch() )) }, ); @@ -1243,7 +1139,7 @@ async fn attestation_that_skips_epochs() { let state_root = state.update_tree_hash_cache().unwrap(); let (attestation, subnet_id) = harness - .get_unaggregated_attestations( + .get_single_attestations( &AttestationStrategy::AllValidators, &state, state_root, @@ -1256,7 +1152,7 @@ async fn attestation_that_skips_epochs() { .cloned() .expect("should have at least one attestation in committee"); - let block_root = attestation.data().beacon_block_root; + let block_root = attestation.data.beacon_block_root; let block_slot = harness .chain .store @@ -1267,7 +1163,7 @@ async fn attestation_that_skips_epochs() { .slot(); assert!( - attestation.data().slot - block_slot > E::slots_per_epoch() * 2, + attestation.data.slot - block_slot > E::slots_per_epoch() * 2, "the attestation must skip more than two epochs" ); @@ -1357,7 +1253,7 @@ async fn attestation_validator_receive_proposer_reward_and_withdrawals() { // Verifying the attestation triggers an inconsistent state replay. let remaining_attesters = (two_thirds..VALIDATOR_COUNT).collect(); let (attestation, subnet_id) = harness - .get_unaggregated_attestations( + .get_single_attestations( &AttestationStrategy::SomeValidators(remaining_attesters), &state, state_root, @@ -1426,7 +1322,7 @@ async fn attestation_to_finalized_block() { let state_root = state.update_tree_hash_cache().unwrap(); let (attestation, subnet_id) = harness - .get_unaggregated_attestations( + .get_single_attestations( &AttestationStrategy::AllValidators, &state, state_root, @@ -1438,7 +1334,7 @@ async fn attestation_to_finalized_block() { .first() .cloned() .expect("should have at least one attestation in committee"); - assert_eq!(attestation.data().beacon_block_root, earlier_block_root); + assert_eq!(attestation.data.beacon_block_root, earlier_block_root); // Attestation should be rejected for attesting to a pre-finalization block. let res = harness @@ -1481,8 +1377,23 @@ async fn verify_aggregate_for_gossip_doppelganger_detection() { "the test requires a new epoch to avoid already-seen errors" ); - let (valid_attestation, _attester_index, _attester_committee_index, _, _) = - get_valid_unaggregated_attestation(&harness.chain); + let (valid_attestation, _, _) = get_valid_unaggregated_attestation(&harness.chain); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let committee = state + .get_beacon_committee( + valid_attestation.data.slot, + valid_attestation.committee_index, + ) + .unwrap(); + let fork_name = harness + .chain + .spec + .fork_name_at_slot::(valid_attestation.data.slot); + let valid_attestation = + single_attestation_to_attestation(&valid_attestation, committee.committee, fork_name) + .unwrap(); let (valid_aggregate, _, _) = get_valid_aggregated_attestation(&harness.chain, valid_attestation); @@ -1540,15 +1451,16 @@ async fn verify_attestation_for_gossip_doppelganger_detection() { "the test requires a new epoch to avoid already-seen errors" ); - let (valid_attestation, index, _attester_committee_index, _, subnet_id) = - get_valid_unaggregated_attestation(&harness.chain); + let (valid_attestation, _, subnet_id) = get_valid_unaggregated_attestation(&harness.chain); + + let index = valid_attestation.attester_index as usize; harness .chain .verify_unaggregated_attestation_for_gossip(&valid_attestation, Some(subnet_id)) .expect("should verify attestation"); - let epoch = valid_attestation.data().target.epoch; + let epoch = valid_attestation.data.target.epoch; assert!(harness.chain.validator_seen_at_epoch(index, epoch)); // Check the correct beacon cache is populated @@ -1612,7 +1524,7 @@ async fn attestation_verification_use_head_state_fork() { let attesters = (0..VALIDATOR_COUNT / 2).collect::>(); let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap(); let committee_attestations = harness - .make_unaggregated_attestations_with_opts( + .make_single_attestations_with_opts( attesters.as_slice(), &state, state_root, @@ -1642,7 +1554,7 @@ async fn attestation_verification_use_head_state_fork() { let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); let bellatrix_fork = spec.fork_for_name(ForkName::Bellatrix).unwrap(); let committee_attestations = harness - .make_unaggregated_attestations_with_opts( + .make_single_attestations_with_opts( attesters.as_slice(), &state, state_root, diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 9225ffd9f4..9a6a789b42 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -30,8 +30,6 @@ type E = MainnetEthSpec; const VALIDATOR_COUNT: usize = 24; const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1]; -// Default custody group count for tests -const CGC: usize = 8; /// A cached set of keys. static KEYPAIRS: LazyLock> = @@ -144,10 +142,9 @@ fn build_rpc_block( RpcBlock::new(None, block, Some(blobs.clone())).unwrap() } Some(DataSidecars::DataColumns(columns)) => { - RpcBlock::new_with_custody_columns(None, block, columns.clone(), columns.len(), spec) - .unwrap() + RpcBlock::new_with_custody_columns(None, block, columns.clone(), spec).unwrap() } - None => RpcBlock::new_without_blobs(None, block, 0), + None => RpcBlock::new_without_blobs(None, block), } } @@ -370,7 +367,6 @@ async fn chain_segment_non_linear_parent_roots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), - harness.sampling_column_count, ); assert!( @@ -408,7 +404,6 @@ async fn chain_segment_non_linear_slots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), - harness.sampling_column_count, ); assert!( @@ -436,7 +431,6 @@ async fn chain_segment_non_linear_slots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), - harness.sampling_column_count, ); assert!( @@ -578,11 +572,7 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); - let rpc_block = RpcBlock::new_without_blobs( - None, - Arc::new(signed_block), - harness.sampling_column_count, - ); + let rpc_block = RpcBlock::new_without_blobs(None, Arc::new(signed_block)); let process_res = harness .chain .process_block( @@ -1002,7 +992,6 @@ async fn block_gossip_verification() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; - let cgc = harness.chain.spec.custody_requirement as usize; harness .chain @@ -1016,7 +1005,7 @@ async fn block_gossip_verification() { { let gossip_verified = harness .chain - .verify_block_for_gossip(snapshot.beacon_block.clone(), get_cgc(&blobs_opt)) + .verify_block_for_gossip(snapshot.beacon_block.clone()) .await .expect("should obtain gossip verified block"); @@ -1058,7 +1047,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)), cgc).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -1092,7 +1081,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)), cgc).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -1122,10 +1111,10 @@ async fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip( - Arc::new(SignedBeaconBlock::from_block(block, junk_signature())), - cgc - ) + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( + block, + junk_signature() + )),) .await ), BlockError::InvalidSignature(InvalidSignature::ProposerSignature) @@ -1150,7 +1139,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)), cgc).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown {parent_root: p} if p == parent_root ), @@ -1176,7 +1165,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)), cgc).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -1213,7 +1202,7 @@ async fn block_gossip_verification() { ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()), cgc).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -1225,7 +1214,7 @@ async fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()), cgc).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::DuplicateImportStatusUnknown(_), ), "should register any valid signature against the proposer, even if the block failed later verification" @@ -1233,11 +1222,7 @@ async fn block_gossip_verification() { let block = chain_segment[block_index].beacon_block.clone(); assert!( - harness - .chain - .verify_block_for_gossip(block, cgc) - .await - .is_ok(), + harness.chain.verify_block_for_gossip(block).await.is_ok(), "the valid block should be processed" ); @@ -1255,13 +1240,45 @@ async fn block_gossip_verification() { matches!( harness .chain - .verify_block_for_gossip(block.clone(), cgc) + .verify_block_for_gossip(block.clone()) .await .expect_err("should error when processing known block"), BlockError::DuplicateImportStatusUnknown(_) ), "the second proposal by this validator should be rejected" ); + + /* + * This test ensures that: + * + * We do not accept blocks with blob_kzg_commitments length larger than the max_blobs for that epoch. + */ + let (mut block, signature) = chain_segment[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + + let kzg_commitments_len = harness + .chain + .spec + .max_blobs_per_block(block.slot().epoch(E::slots_per_epoch())) + as usize; + + if let Ok(kzg_commitments) = block.body_mut().blob_kzg_commitments_mut() { + *kzg_commitments = vec![KzgCommitment::empty_for_testing(); kzg_commitments_len + 1].into(); + assert!( + matches!( + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), + BlockError::InvalidBlobCount { + max_blobs_at_epoch, + block, + } + if max_blobs_at_epoch == kzg_commitments_len && block == kzg_commitments_len + 1 + ), + "should not import a block with higher blob_kzg_commitment length than the max_blobs at epoch" + ); + } } async fn verify_and_process_gossip_data_sidecars( @@ -1331,17 +1348,8 @@ async fn verify_block_for_gossip_slashing_detection() { let state = harness.get_current_state(); let ((block1, blobs1), _) = harness.make_block(state.clone(), Slot::new(1)).await; let ((block2, _blobs2), _) = harness.make_block(state, Slot::new(1)).await; - let cgc = if block1.fork_name_unchecked().fulu_enabled() { - harness.get_sampling_column_count() - } else { - 0 - }; - let verified_block = harness - .chain - .verify_block_for_gossip(block1, cgc) - .await - .unwrap(); + let verified_block = harness.chain.verify_block_for_gossip(block1).await.unwrap(); if let Some((kzg_proofs, blobs)) = blobs1 { harness @@ -1364,7 +1372,7 @@ async fn verify_block_for_gossip_slashing_detection() { ) .await .unwrap(); - unwrap_err(harness.chain.verify_block_for_gossip(block2, CGC).await); + unwrap_err(harness.chain.verify_block_for_gossip(block2).await); // Slasher should have been handed the two conflicting blocks and crafted a slashing. slasher.process_queued(Epoch::new(0)).unwrap(); @@ -1388,11 +1396,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { .attestations() .map(|att| att.clone_as_attestation()) .collect::>(); - let verified_block = harness - .chain - .verify_block_for_gossip(block, CGC) - .await - .unwrap(); + let verified_block = harness.chain.verify_block_for_gossip(block).await.unwrap(); harness .chain .process_block( @@ -1539,7 +1543,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(base_block.clone()), CGC) + .verify_block_for_gossip(Arc::new(base_block.clone())) .await .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1549,7 +1553,7 @@ async fn add_base_block_to_altair_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let base_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(base_block.clone()), 0); + let base_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(base_block.clone())); assert!(matches!( harness .chain @@ -1573,7 +1577,7 @@ async fn add_base_block_to_altair_chain() { harness .chain .process_chain_segment( - vec![RpcBlock::new_without_blobs(None, Arc::new(base_block), 0)], + vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))], NotifyExecutionLayer::Yes, ) .await, @@ -1676,7 +1680,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(altair_block.clone()), CGC) + .verify_block_for_gossip(Arc::new(altair_block.clone())) .await .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1686,7 +1690,7 @@ async fn add_altair_block_to_base_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let altair_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(altair_block.clone()), 0); + let altair_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(altair_block.clone())); assert!(matches!( harness .chain @@ -1710,7 +1714,7 @@ async fn add_altair_block_to_base_chain() { harness .chain .process_chain_segment( - vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block), 0)], + vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))], NotifyExecutionLayer::Yes ) .await, @@ -1771,11 +1775,7 @@ async fn import_duplicate_block_unrealized_justification() { // Create two verified variants of the block, representing the same block being processed in // parallel. let notify_execution_layer = NotifyExecutionLayer::Yes; - let rpc_block = RpcBlock::new_without_blobs( - Some(block_root), - block.clone(), - harness.sampling_column_count, - ); + let rpc_block = RpcBlock::new_without_blobs(Some(block_root), block.clone()); let verified_block1 = rpc_block .clone() .into_execution_pending_block(block_root, chain, notify_execution_layer) @@ -1846,14 +1846,3 @@ async fn import_execution_pending_block( } } } - -fn get_cgc(blobs_opt: &Option>) -> usize { - if let Some(data_sidecars) = blobs_opt.as_ref() { - match data_sidecars { - DataSidecars::Blobs(_) => 0, - DataSidecars::DataColumns(d) => d.len(), - } - } else { - 0 - } -} diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index c9bd55e062..5d0f22e252 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -1,11 +1,15 @@ use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::test_utils::BeaconChainHarness; -use eth2::types::{EventKind, SseBlobSidecar}; +use beacon_chain::data_column_verification::GossipVerifiedDataColumn; +use beacon_chain::test_utils::{BeaconChainHarness, TEST_DATA_COLUMN_SIDECARS_SSZ}; +use eth2::types::{EventKind, SseBlobSidecar, SseDataColumnSidecar}; use rand::rngs::StdRng; use rand::SeedableRng; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec}; +use types::test_utils::TestRandom; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ForkName, MinimalEthSpec, RuntimeVariableList, +}; type E = MinimalEthSpec; @@ -43,6 +47,42 @@ async fn blob_sidecar_event_on_process_gossip_blob() { assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs)); } +/// Verifies that a data column event is emitted when a gossip verified data column is received via gossip or the publish block API. +#[tokio::test] +async fn data_column_sidecar_event_on_process_gossip_data_column() { + let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut data_column_event_receiver = event_handler.subscribe_data_column_sidecar(); + + // build and process a gossip verified data column + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let sidecar = Arc::new(DataColumnSidecar::random_for_test(&mut rng)); + let gossip_verified_data_column = GossipVerifiedDataColumn::__new_for_testing(sidecar); + let expected_sse_data_column = SseDataColumnSidecar::from_data_column_sidecar( + gossip_verified_data_column.as_data_column(), + ); + + let _ = harness + .chain + .process_gossip_data_columns(vec![gossip_verified_data_column], || Ok(())) + .await + .unwrap(); + + let sidecar_event = data_column_event_receiver.try_recv().unwrap(); + assert_eq!( + sidecar_event, + EventKind::DataColumnSidecar(expected_sse_data_column) + ); +} + /// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { @@ -95,3 +135,41 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { } assert_eq!(sse_blobs, expected_sse_blobs); } + +#[tokio::test] +async fn data_column_sidecar_event_on_process_rpc_columns() { + let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.clone()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut data_column_event_receiver = event_handler.subscribe_data_column_sidecar(); + + // load the precomputed column sidecar to avoid computing them for every block in the tests. + let mut sidecar = RuntimeVariableList::>::from_ssz_bytes( + TEST_DATA_COLUMN_SIDECARS_SSZ, + spec.number_of_columns as usize, + ) + .unwrap()[0] + .clone(); + let parent_root = harness.chain.head().head_block_root(); + sidecar.signed_block_header.message.parent_root = parent_root; + let expected_sse_data_column = SseDataColumnSidecar::from_data_column_sidecar(&sidecar); + + let _ = harness + .chain + .process_rpc_custody_columns(vec![Arc::new(sidecar)]) + .await + .unwrap(); + + let sidecar_event = data_column_event_receiver.try_recv().unwrap(); + assert_eq!( + sidecar_event, + EventKind::DataColumnSidecar(expected_sse_data_column) + ); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 942ce81684..f0978c5f05 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -7,6 +7,7 @@ mod events; mod op_verification; mod payload_invalidation; mod rewards; +mod schema_stability; mod store_tests; mod sync_committee_verification; mod tests; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 6b9ff9d6ed..05fae7aa70 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -22,7 +22,6 @@ use task_executor::ShutdownReason; use types::*; const VALIDATOR_COUNT: usize = 32; -const CGC: usize = 8; type E = MainnetEthSpec; @@ -686,8 +685,7 @@ async fn invalidates_all_descendants() { assert_eq!(fork_parent_state.slot(), fork_parent_slot); let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; - let fork_rpc_block = - RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); + let fork_rpc_block = RpcBlock::new_without_blobs(None, fork_block.clone()); let fork_block_root = rig .harness .chain @@ -789,8 +787,7 @@ async fn switches_heads() { let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_rpc_block = - RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); + let fork_rpc_block = RpcBlock::new_without_blobs(None, fork_block.clone()); let fork_block_root = rig .harness .chain @@ -1054,14 +1051,13 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone(), CGC).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload is invalid for import. - let rpc_block = - RpcBlock::new_without_blobs(None, block.clone(), rig.harness.sampling_column_count); + let rpc_block = RpcBlock::new_without_blobs(None, block.clone()); assert!(matches!( rig.harness.chain.process_block(rpc_block.block_root(), rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1385,8 +1381,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { } = InvalidHeadSetup::new().await; // Import the fork block, it should become the head. - let fork_rpc_block = - RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); + let fork_rpc_block = RpcBlock::new_without_blobs(None, fork_block.clone()); rig.harness .chain .process_block( diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs new file mode 100644 index 0000000000..00d75a554d --- /dev/null +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -0,0 +1,151 @@ +use beacon_chain::{ + persisted_beacon_chain::PersistedBeaconChain, + persisted_custody::PersistedCustody, + test_utils::{test_spec, BeaconChainHarness, DiskHarnessType}, + ChainConfig, +}; +use logging::create_test_tracing_subscriber; +use operation_pool::PersistedOperationPool; +use ssz::Encode; +use std::sync::{Arc, LazyLock}; +use store::{ + database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::DataColumnInfo, + DBColumn, HotColdDB, StoreConfig, StoreItem, +}; +use strum::IntoEnumIterator; +use tempfile::{tempdir, TempDir}; +use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec}; + +type E = MainnetEthSpec; +type Store = Arc, BeaconNodeBackend>>; +type TestHarness = BeaconChainHarness>; + +const VALIDATOR_COUNT: usize = 32; + +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); + +fn get_store(db_path: &TempDir, config: StoreConfig, spec: Arc) -> Store { + create_test_tracing_subscriber(); + let hot_path = db_path.path().join("chain_db"); + let cold_path = db_path.path().join("freezer_db"); + let blobs_path = db_path.path().join("blobs_db"); + + HotColdDB::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + config, + spec, + ) + .expect("disk store should initialize") +} + +/// This test checks the database schema stability against previous versions of Lighthouse's code. +/// +/// If you are changing something about how Lighthouse stores data on disk, you almost certainly +/// need to implement a database schema change. This is true even if the data being stored only +/// applies to an upcoming fork that isn't live on mainnet. We never want to be in the situation +/// where commit A writes data in some format, and then a later commit B changes that format +/// without a schema change. This is liable to break any nodes that update from A to B, even if +/// these nodes are just testnet nodes. +/// +/// This test implements partial, imperfect checks on the DB schema which are designed to quickly +/// catch common changes. +/// +/// This test uses hardcoded values, rather than trying to access previous versions of Lighthouse's +/// code. If you've successfully implemented a schema change and you're sure that the new values are +/// correct, you can update the hardcoded values here. +#[tokio::test] +async fn schema_stability() { + let spec = Arc::new(test_spec::()); + + let datadir = tempdir().unwrap(); + let store_config = StoreConfig::default(); + let store = get_store(&datadir, store_config, spec.clone()); + + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + + let harness = TestHarness::builder(MainnetEthSpec) + .spec(spec) + .keypairs(KEYPAIRS.to_vec()) + .fresh_disk_store(store.clone()) + .mock_execution_layer() + .chain_config(chain_config) + .build(); + harness.advance_slot(); + + let chain = &harness.chain; + + chain.persist_op_pool().unwrap(); + chain.persist_custody_context().unwrap(); + + check_db_columns(); + check_metadata_sizes(&store); + check_op_pool(&store); + check_custody_context(&store); + check_persisted_chain(&store); + + // Not covered here: + // - Fork choice (not tested) + // - DBColumn::DhtEnrs (tested in network crate) +} + +/// Check that the set of database columns is unchanged. +fn check_db_columns() { + let current_columns: Vec<&'static str> = DBColumn::iter().map(|c| c.as_str()).collect(); + let expected_columns = vec![ + "bma", "blk", "blb", "bdc", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", "bst", + "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", "brm", + "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", + ]; + assert_eq!(expected_columns, current_columns); +} + +/// Check the SSZ sizes of known on-disk metadata. +/// +/// New types can be added here as the schema evolves. +fn check_metadata_sizes(store: &Store) { + assert_eq!(Split::default().ssz_bytes_len(), 40); + assert_eq!(store.get_anchor_info().ssz_bytes_len(), 64); + assert_eq!( + store.get_blob_info().ssz_bytes_len(), + if store.get_chain_spec().deneb_fork_epoch.is_some() { + 14 + } else { + 6 + } + ); + assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5); +} + +fn check_op_pool(store: &Store) { + let op_pool = store + .get_item::>(&Hash256::ZERO) + .unwrap() + .unwrap(); + assert!(matches!(op_pool, PersistedOperationPool::V20(_))); + assert_eq!(op_pool.ssz_bytes_len(), 28); + assert_eq!(op_pool.as_store_bytes().len(), 28); +} + +fn check_custody_context(store: &Store) { + let custody_context = store + .get_item::(&Hash256::ZERO) + .unwrap() + .unwrap(); + assert_eq!(custody_context.as_store_bytes().len(), 9); +} + +fn check_persisted_chain(store: &Store) { + let chain = store + .get_item::(&Hash256::ZERO) + .unwrap() + .unwrap(); + assert_eq!(chain.as_store_bytes().len(), 32); +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 3343dc101b..1be2879e1a 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -24,15 +24,18 @@ use state_processing::{state_advance::complete_state_advance, BlockReplayer}; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; +use std::str::FromStr; use std::sync::{Arc, LazyLock}; use std::time::Duration; use store::database::interface::BeaconNodeBackend; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; use store::{ + hdiff::HierarchyConfig, iter::{BlockRootsIterator, StateRootsIterator}, BlobInfo, DBColumn, HotColdDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; +use tracing::info; use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; @@ -121,15 +124,16 @@ fn get_harness_generic( harness } -fn count_states_descendant_of_block( +fn get_states_descendant_of_block( store: &HotColdDB, BeaconNodeBackend>, block_root: Hash256, -) -> usize { +) -> Vec<(Hash256, Slot)> { let summaries = store.load_hot_state_summaries().unwrap(); summaries .iter() .filter(|(_, s)| s.latest_block_root == block_root) - .count() + .map(|(state_root, summary)| (*state_root, summary.slot)) + .collect() } #[tokio::test] @@ -491,7 +495,7 @@ async fn epoch_boundary_state_attestation_processing() { .await; let head = harness.chain.head_snapshot(); - late_attestations.extend(harness.get_unaggregated_attestations( + late_attestations.extend(harness.get_single_attestations( &AttestationStrategy::SomeValidators(late_validators.clone()), &head.beacon_state, head.beacon_state_root(), @@ -511,20 +515,23 @@ async fn epoch_boundary_state_attestation_processing() { for (attestation, subnet_id) in late_attestations.into_iter().flatten() { // load_epoch_boundary_state is idempotent! - let block_root = attestation.data().beacon_block_root; + let block_root = attestation.data.beacon_block_root; let block = store .get_blinded_block(&block_root) .unwrap() .expect("block exists"); + // Use get_state as the state may be finalized by this point let mut epoch_boundary_state = store - .load_epoch_boundary_state(&block.state_root()) + .get_state(&block.state_root(), None, CACHE_STATE_IN_TESTS) .expect("no error") - .expect("epoch boundary state exists"); + .unwrap_or_else(|| { + panic!("epoch boundary state should exist {:?}", block.state_root()) + }); let ebs_state_root = epoch_boundary_state.update_tree_hash_cache().unwrap(); let mut ebs_of_ebs = store - .load_epoch_boundary_state(&ebs_state_root) + .get_state(&ebs_state_root, None, CACHE_STATE_IN_TESTS) .expect("no error") - .expect("ebs of ebs exists"); + .unwrap_or_else(|| panic!("ebs of ebs should exist {ebs_state_root:?}")); ebs_of_ebs.apply_pending_mutations().unwrap(); assert_eq!(epoch_boundary_state, ebs_of_ebs); @@ -536,7 +543,7 @@ async fn epoch_boundary_state_attestation_processing() { .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); - let expected_attestation_slot = attestation.data().slot; + let expected_attestation_slot = attestation.data.slot; // Extra -1 to handle gossip clock disparity. let expected_earliest_permissible_slot = current_slot - E::slots_per_epoch() - 1; @@ -2171,7 +2178,8 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { let slots_per_epoch = E::slots_per_epoch(); - let genesis_state = harness.get_current_state(); + let mut genesis_state = harness.get_current_state(); + let genesis_state_root = genesis_state.update_tree_hash_cache().unwrap(); let block_slot = Slot::new(2 * slots_per_epoch); let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; @@ -2198,7 +2206,7 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { // The bad block parent root is the genesis block root. There's `block_slot - 1` temporary // states to remove + the genesis state = block_slot. assert_eq!( - count_states_descendant_of_block(&store, bad_block_parent_root), + get_states_descendant_of_block(&store, bad_block_parent_root).len(), block_slot.as_usize(), ); @@ -2216,11 +2224,12 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { // Check that the finalization migration ran. assert_ne!(store.get_split_slot(), 0); - // Check that temporary states have been pruned. The genesis block is not a descendant of the - // latest finalized checkpoint, so all its states have been pruned from the hot DB, = 0. + // Check that temporary states have been pruned. assert_eq!( - count_states_descendant_of_block(&store, bad_block_parent_root), - 0 + get_states_descendant_of_block(&store, bad_block_parent_root), + // The genesis state is kept to support the HDiff grid + vec![(genesis_state_root, Slot::new(0))], + "get_states_descendant_of_block({bad_block_parent_root:?})" ); } @@ -2322,6 +2331,8 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_state(&wss_state_root, Some(checkpoint_slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); + let wss_state_slot = wss_state.slot(); + let wss_block_slot = wss_block.slot(); // Add more blocks that advance finalization further. harness.advance_slot(); @@ -2368,8 +2379,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { ) .unwrap() .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") .slot_clock(slot_clock) .shutdown_sender(shutdown_tx) .chain_config(ChainConfig::default()) @@ -2414,12 +2423,14 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap(); let slot = full_block.slot(); + let full_block_root = full_block.canonical_root(); let state_root = full_block.state_root(); + info!(block_root = ?full_block_root, ?state_root, %slot, "Importing block from chain dump"); beacon_chain.slot_clock.set_slot(slot.as_u64()); beacon_chain .process_block( - full_block.canonical_root(), + full_block_root, harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, @@ -2506,8 +2517,19 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { HistoricalBlockError::InvalidSignature )); + let available_blocks_slots = available_blocks + .iter() + .map(|block| (block.block().slot(), block.block().canonical_root())) + .collect::>(); + info!( + ?available_blocks_slots, + "wss_block_slot" = wss_block.slot().as_usize(), + "Importing historical block batch" + ); + // Importing the batch with valid signatures should succeed. let available_blocks_dup = available_blocks.iter().map(clone_block).collect::>(); + assert_eq!(beacon_chain.store.get_oldest_block_slot(), wss_block.slot()); beacon_chain .import_historical_block_batch(available_blocks_dup) .unwrap(); @@ -2518,6 +2540,17 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .import_historical_block_batch(available_blocks) .unwrap(); + // Sanity check for non-aligned WSS starts, to make sure the WSS block is persisted properly + if wss_block_slot != wss_state_slot { + let new_node_block_root_at_wss_block = beacon_chain + .store + .get_cold_block_root(wss_block_slot) + .unwrap() + .unwrap(); + info!(?new_node_block_root_at_wss_block, %wss_block_slot); + assert_eq!(new_node_block_root_at_wss_block, wss_block.canonical_root()); + } + // The forwards iterator should now match the original chain let forwards = beacon_chain .forwards_iter_block_roots(Slot::new(0)) @@ -2571,11 +2604,25 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { } // Anchor slot is still set to the slot of the checkpoint block. - assert_eq!(store.get_anchor_info().anchor_slot, wss_block.slot()); + // Note: since hot tree states the anchor slot is set to the aligned ws state slot + // https://github.com/sigp/lighthouse/pull/6750 + let wss_aligned_slot = if checkpoint_slot % E::slots_per_epoch() == 0 { + checkpoint_slot + } else { + (checkpoint_slot.epoch(E::slots_per_epoch()) + Epoch::new(1)) + .start_slot(E::slots_per_epoch()) + }; + assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); + assert_eq!( + store.get_anchor_info().state_upper_limit, + Slot::new(u64::MAX) + ); + info!(anchor = ?store.get_anchor_info(), "anchor pre"); // Reconstruct states. store.clone().reconstruct_historic_states(None).unwrap(); - assert_eq!(store.get_anchor_info().anchor_slot, 0); + assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); + assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0)); } /// Test that blocks and attestations that refer to states around an unaligned split state are @@ -2644,11 +2691,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert_eq!(split.block_root, valid_fork_block.parent_root()); assert_ne!(split.state_root, unadvanced_split_state_root); - let invalid_fork_rpc_block = RpcBlock::new_without_blobs( - None, - invalid_fork_block.clone(), - harness.sampling_column_count, - ); + let invalid_fork_rpc_block = RpcBlock::new_without_blobs(None, invalid_fork_block.clone()); // Applying the invalid block should fail. let err = harness .chain @@ -2664,11 +2707,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); // Applying the valid block should succeed, but it should not become head. - let valid_fork_rpc_block = RpcBlock::new_without_blobs( - None, - valid_fork_block.clone(), - harness.sampling_column_count, - ); + let valid_fork_rpc_block = RpcBlock::new_without_blobs(None, valid_fork_block.clone()); harness .chain .process_block( @@ -2712,7 +2751,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { slot, ); harness.advance_slot(); - harness.process_attestations(attestations); + harness.process_attestations(attestations, &advanced_split_state); } } @@ -2763,10 +2802,6 @@ async fn finalizes_after_resuming_from_db() { .chain .persist_op_pool() .expect("should persist the op pool"); - harness - .chain - .persist_eth1_cache() - .expect("should persist the eth1 cache"); let original_chain = harness.chain; @@ -2874,8 +2909,8 @@ async fn revert_minority_fork_on_resume() { ); harness1.set_current_slot(slot); harness2.set_current_slot(slot); - harness1.process_attestations(attestations.clone()); - harness2.process_attestations(attestations); + harness1.process_attestations(attestations.clone(), &state); + harness2.process_attestations(attestations, &state); let ((block, blobs), new_state) = harness1.make_block(state, slot).await; @@ -2915,7 +2950,7 @@ async fn revert_minority_fork_on_resume() { slot, ); harness2.set_current_slot(slot); - harness2.process_attestations(attestations); + harness2.process_attestations(attestations, &state2); // Minority chain block (no attesters). let ((block1, blobs1), new_state1) = harness1.make_block(state1, slot).await; @@ -3007,12 +3042,27 @@ async fn revert_minority_fork_on_resume() { // version is correct. This is the easiest schema test to write without historic versions of // Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually // as old downgrades are deprecated. -#[tokio::test] -async fn schema_downgrade_to_min_version() { +async fn schema_downgrade_to_min_version( + store_config: StoreConfig, + reconstruct_historic_states: bool, +) { let num_blocks_produced = E::slots_per_epoch() * 4; let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = test_spec::(); + + let chain_config = ChainConfig { + reconstruct_historic_states, + ..ChainConfig::default() + }; + let import_all_data_columns = false; + + let store = get_store_generic(&db_path, store_config.clone(), spec.clone()); + let harness = get_harness_generic( + store.clone(), + LOW_VALIDATOR_COUNT, + chain_config.clone(), + import_all_data_columns, + ); harness .extend_chain( @@ -3023,7 +3073,6 @@ async fn schema_downgrade_to_min_version() { .await; let min_version = SchemaVersion(22); - let genesis_state_root = Some(harness.chain.genesis_state_root); // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -3033,49 +3082,106 @@ async fn schema_downgrade_to_min_version() { drop(harness); // Re-open the store. - let store = get_store(&db_path); + let store = get_store_generic(&db_path, store_config, spec); // Downgrade. - migrate_schema::>( - store.clone(), - genesis_state_root, - CURRENT_SCHEMA_VERSION, - min_version, - ) - .expect("schema downgrade to minimum version should work"); + migrate_schema::>(store.clone(), CURRENT_SCHEMA_VERSION, min_version) + .expect("schema downgrade to minimum version should work"); // Upgrade back. - migrate_schema::>( - store.clone(), - genesis_state_root, - min_version, - CURRENT_SCHEMA_VERSION, - ) - .expect("schema upgrade from minimum version should work"); + migrate_schema::>(store.clone(), min_version, CURRENT_SCHEMA_VERSION) + .expect("schema upgrade from minimum version should work"); // Recreate the harness. let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() + .chain_config(chain_config) .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) .testing_slot_clock(slot_clock) .resumed_disk_store(store.clone()) .mock_execution_layer() .build(); + // Check chain dump for appropriate range depending on whether this is an archive node. + let chain_dump_start_slot = if reconstruct_historic_states { + Slot::new(0) + } else { + store.get_split_slot() + }; + check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store.clone()); - check_chain_dump(&harness, num_blocks_produced + 1); - check_iterators(&harness); + check_chain_dump_from_slot( + &harness, + chain_dump_start_slot, + num_blocks_produced + 1 - chain_dump_start_slot.as_u64(), + ); + check_iterators_from_slot(&harness, chain_dump_start_slot); // Check that downgrading beyond the minimum version fails (bound is *tight*). let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); - migrate_schema::>( - store.clone(), - genesis_state_root, - CURRENT_SCHEMA_VERSION, - min_version_sub_1, + migrate_schema::>(store.clone(), CURRENT_SCHEMA_VERSION, min_version_sub_1) + .expect_err("should not downgrade below minimum version"); +} + +// Schema upgrade/downgrade on an archive node where the optimised migration does apply due +// to the split state being aligned to a diff layer. +#[tokio::test] +async fn schema_downgrade_to_min_version_archive_node_grid_aligned() { + // Need to use 3 as the hierarchy exponent to get diffs on every epoch boundary with minimal + // spec. + schema_downgrade_to_min_version( + StoreConfig { + hierarchy_config: HierarchyConfig::from_str("3,4,5").unwrap(), + prune_payloads: false, + ..StoreConfig::default() + }, + true, ) - .expect_err("should not downgrade below minimum version"); + .await +} + +// Schema upgrade/downgrade on an archive node where the optimised migration DOES NOT apply +// due to the split state NOT being aligned to a diff layer. +#[tokio::test] +async fn schema_downgrade_to_min_version_archive_node_grid_unaligned() { + schema_downgrade_to_min_version( + StoreConfig { + hierarchy_config: HierarchyConfig::from_str("7").unwrap(), + prune_payloads: false, + ..StoreConfig::default() + }, + true, + ) + .await +} + +// Schema upgrade/downgrade on a full node with a fairly normal per-epoch diff config. +#[tokio::test] +async fn schema_downgrade_to_min_version_full_node_per_epoch_diffs() { + schema_downgrade_to_min_version( + StoreConfig { + hierarchy_config: HierarchyConfig::from_str("3,4,5").unwrap(), + prune_payloads: false, + ..StoreConfig::default() + }, + false, + ) + .await +} + +// Schema upgrade/downgrade on a full node with dense per-slot diffs. +#[tokio::test] +async fn schema_downgrade_to_min_version_full_node_dense_diffs() { + schema_downgrade_to_min_version( + StoreConfig { + hierarchy_config: HierarchyConfig::from_str("0,3,4,5").unwrap(), + prune_payloads: false, + ..StoreConfig::default() + }, + true, + ) + .await } /// Check that blob pruning prunes blobs older than the data availability boundary. @@ -3463,6 +3569,163 @@ async fn prune_historic_states() { check_split_slot(&harness, store); } +// Test the function `get_ancestor_state_root` for slots prior to the split where we only have +// sparse summaries stored. +#[tokio::test] +async fn ancestor_state_root_prior_to_split() { + let db_path = tempdir().unwrap(); + + let spec = test_spec::(); + + let store_config = StoreConfig { + prune_payloads: false, + hierarchy_config: HierarchyConfig::from_str("5,7,8").unwrap(), + ..StoreConfig::default() + }; + let chain_config = ChainConfig { + reconstruct_historic_states: false, + ..ChainConfig::default() + }; + let import_all_data_columns = false; + + let store = get_store_generic(&db_path, store_config, spec); + let harness = get_harness_generic( + store.clone(), + LOW_VALIDATOR_COUNT, + chain_config, + import_all_data_columns, + ); + + // Produce blocks until we have passed through two full snapshot periods. This period length is + // determined by the hierarchy config set above. + let num_blocks = 2 * store + .hierarchy + .next_snapshot_slot(Slot::new(1)) + .unwrap() + .as_u64(); + + for num_blocks_so_far in 0..num_blocks { + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + harness.advance_slot(); + + // Check that `get_ancestor_state_root` can look up the grid-aligned ancestors of every hot + // state, even at ancestor slots prior to the split. + let head_state = harness.get_current_state(); + assert_eq!(head_state.slot().as_u64(), num_blocks_so_far + 1); + + let split_slot = store.get_split_slot(); + let anchor_slot = store.get_anchor_info().anchor_slot; + + for state_slot in (split_slot.as_u64()..=num_blocks_so_far).map(Slot::new) { + for ancestor_slot in store + .hierarchy + .closest_layer_points(state_slot, anchor_slot) + { + // The function currently doesn't consider a state an ancestor of itself, so this + // does not work. + if ancestor_slot == state_slot { + continue; + } + let ancestor_state_root = store::hot_cold_store::get_ancestor_state_root( + &store, + &head_state, + ancestor_slot, + ) + .unwrap_or_else(|e| { + panic!( + "get_ancestor_state_root failed for state_slot={state_slot}, \ + ancestor_slot={ancestor_slot}, head_slot={}. error: {e:?}", + head_state.slot() + ) + }); + + // Check state root correctness. + assert_eq!( + store + .load_hot_state_summary(&ancestor_state_root) + .unwrap() + .unwrap_or_else(|| panic!( + "no summary found for {ancestor_state_root:?} (slot {ancestor_slot})" + )) + .slot, + ancestor_slot, + ) + } + } + } + + // This test only makes sense if the split is non-zero by the end. + assert_ne!(store.get_split_slot(), 0); +} + +// Test that the chain operates correctly when the split state is stored as a ReplayFrom. +#[tokio::test] +async fn replay_from_split_state() { + let db_path = tempdir().unwrap(); + + let spec = test_spec::(); + + let store_config = StoreConfig { + prune_payloads: false, + hierarchy_config: HierarchyConfig::from_str("5").unwrap(), + ..StoreConfig::default() + }; + let chain_config = ChainConfig { + reconstruct_historic_states: false, + ..ChainConfig::default() + }; + let import_all_data_columns = false; + + let store = get_store_generic(&db_path, store_config.clone(), spec.clone()); + let harness = get_harness_generic( + store.clone(), + LOW_VALIDATOR_COUNT, + chain_config, + import_all_data_columns, + ); + + // Produce blocks until we finalize epoch 3 which will not be stored as a snapshot. + let num_blocks = 5 * E::slots_per_epoch() as usize; + + harness + .extend_chain( + num_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let split = store.get_split_info(); + let anchor_slot = store.get_anchor_info().anchor_slot; + assert_eq!(split.slot, 3 * E::slots_per_epoch()); + assert_eq!(anchor_slot, 0); + assert!(store + .hierarchy + .storage_strategy(split.slot, anchor_slot) + .unwrap() + .is_replay_from()); + + // Close the database and reopen it. + drop(store); + drop(harness); + + let store = get_store_generic(&db_path, store_config, spec); + + // Check that the split state is still accessible. + assert_eq!(store.get_split_slot(), split.slot); + let state = store + .get_hot_state(&split.state_root, false) + .unwrap() + .expect("split state should be present"); + assert_eq!(state.slot(), split.slot); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). @@ -3556,7 +3819,11 @@ fn check_split_slot( /// Check that all the states in a chain dump have the correct tree hash. fn check_chain_dump(harness: &TestHarness, expected_len: u64) { - let mut chain_dump = harness.chain.chain_dump().unwrap(); + check_chain_dump_from_slot(harness, Slot::new(0), expected_len) +} + +fn check_chain_dump_from_slot(harness: &TestHarness, from_slot: Slot, expected_len: u64) { + let mut chain_dump = harness.chain.chain_dump_from_slot(from_slot).unwrap(); assert_eq!(chain_dump.len() as u64, expected_len); @@ -3604,7 +3871,7 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { let mut forward_block_roots = harness .chain - .forwards_iter_block_roots(Slot::new(0)) + .forwards_iter_block_roots(from_slot) .expect("should get iter") .map(Result::unwrap) .collect::>(); @@ -3625,10 +3892,14 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { /// Check that every state from the canonical chain is in the database, and that the /// reverse state and block root iterators reach genesis. fn check_iterators(harness: &TestHarness) { + check_iterators_from_slot(harness, Slot::new(0)) +} + +fn check_iterators_from_slot(harness: &TestHarness, slot: Slot) { let mut max_slot = None; for (state_root, slot) in harness .chain - .forwards_iter_state_roots(Slot::new(0)) + .forwards_iter_state_roots(slot) .expect("should get iter") .map(Result::unwrap) { @@ -3650,7 +3921,7 @@ fn check_iterators(harness: &TestHarness) { assert_eq!( harness .chain - .forwards_iter_block_roots(Slot::new(0)) + .forwards_iter_block_roots(slot) .expect("should get iter") .last() .map(Result::unwrap) diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c801361fd5..55ef3dc279 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -9,6 +9,7 @@ use beacon_chain::{ BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use operation_pool::PersistedOperationPool; +use state_processing::EpochProcessingError; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use std::sync::LazyLock; use types::{ @@ -67,11 +68,23 @@ fn massive_skips() { }; assert!(state.slot() > 1, "the state should skip at least one slot"); - assert_eq!( - error, - SlotProcessingError::BeaconStateError(BeaconStateError::InsufficientValidators), - "should return error indicating that validators have been slashed out" - ) + + if state.fork_name_unchecked().fulu_enabled() { + // post-fulu this is done in per_epoch_processing + assert_eq!( + error, + SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError( + BeaconStateError::InsufficientValidators + )), + "should return error indicating that validators have been slashed out" + ) + } else { + assert_eq!( + error, + SlotProcessingError::BeaconStateError(BeaconStateError::InsufficientValidators), + "should return error indicating that validators have been slashed out" + ) + } } #[tokio::test] @@ -567,7 +580,7 @@ async fn attestations_with_increasing_slots() { let head = harness.chain.head_snapshot(); let head_state_root = head.beacon_state_root(); - attestations.extend(harness.get_unaggregated_attestations( + attestations.extend(harness.get_single_attestations( &AttestationStrategy::AllValidators, &head.beacon_state, head_state_root, @@ -584,7 +597,7 @@ async fn attestations_with_increasing_slots() { .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); - let expected_attestation_slot = attestation.data().slot; + let expected_attestation_slot = attestation.data.slot; let expected_earliest_permissible_slot = current_slot - MinimalEthSpec::slots_per_epoch() - 1; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index bca37b4e6d..5b861d1a4a 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -81,7 +81,7 @@ async fn missed_blocks_across_epochs() { epoch, decision_root, state - .get_beacon_proposer_indices(&harness.chain.spec) + .get_beacon_proposer_indices(epoch, &harness.chain.spec) .unwrap(), state.fork(), ) @@ -147,7 +147,9 @@ async fn missed_blocks_basic() { let mut slot_in_epoch = slot % slots_per_epoch; let mut prev_slot = Slot::new(idx - 1); let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap(); - let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap(); + let mut validator_indexes = _state + .get_beacon_proposer_indices(epoch, &harness1.spec) + .unwrap(); let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let mut proposer_shuffling_decision_root = _state .proposer_shuffling_decision_root(duplicate_block_root) @@ -219,7 +221,9 @@ async fn missed_blocks_basic() { prev_slot = Slot::new(idx - 1); slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); - validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); + validator_indexes = _state2 + .get_beacon_proposer_indices(epoch, &harness2.spec) + .unwrap(); missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let beacon_proposer_cache = harness2 @@ -317,7 +321,9 @@ async fn missed_blocks_basic() { slot_in_epoch = slot % slots_per_epoch; prev_slot = Slot::new(idx - 1); duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap(); - validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap(); + validator_indexes = _state3 + .get_beacon_proposer_indices(epoch, &harness3.spec) + .unwrap(); missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; proposer_shuffling_decision_root = _state3 .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index e864cb1fd9..0f324071a1 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -39,7 +39,7 @@ //! task. use crate::work_reprocessing_queue::{ - QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, + QueuedBackfillBatch, QueuedColumnReconstruction, QueuedGossipBlock, ReprocessQueueMessage, }; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; @@ -47,6 +47,7 @@ use lighthouse_network::{MessageId, NetworkGlobals, PeerId}; use logging::crit; use logging::TimeLatch; use parking_lot::Mutex; +pub use scheduler::work_reprocessing_queue; use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use std::cmp; @@ -63,7 +64,7 @@ use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; use tracing::{debug, error, trace, warn}; use types::{ - Attestation, BeaconState, ChainSpec, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, + BeaconState, ChainSpec, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, }; use work_reprocessing_queue::{ @@ -73,7 +74,7 @@ use work_reprocessing_queue::{ use work_reprocessing_queue::{IgnoredRpcBlock, QueuedSamplingRequest}; mod metrics; -pub mod work_reprocessing_queue; +pub mod scheduler; /// The maximum size of the channel for work events to the `BeaconProcessor`. /// @@ -117,6 +118,7 @@ pub struct BeaconProcessorQueueLengths { rpc_custody_column_queue: usize, rpc_verify_data_column_queue: usize, sampling_result_queue: usize, + column_reconstruction_queue: usize, chain_segment_queue: usize, backfill_chain_segment: usize, gossip_block_queue: usize, @@ -184,6 +186,7 @@ impl BeaconProcessorQueueLengths { rpc_verify_data_column_queue: 1000, unknown_block_sampling_request_queue: 16384, sampling_result_queue: 1000, + column_reconstruction_queue: 64, chain_segment_queue: 64, backfill_chain_segment: 64, gossip_block_queue: 1024, @@ -262,22 +265,16 @@ impl Default for BeaconProcessorConfig { pub struct BeaconProcessorChannels { pub beacon_processor_tx: BeaconProcessorSend, pub beacon_processor_rx: mpsc::Receiver>, - pub work_reprocessing_tx: mpsc::Sender, - pub work_reprocessing_rx: mpsc::Receiver, } impl BeaconProcessorChannels { pub fn new(config: &BeaconProcessorConfig) -> Self { let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(config.max_work_event_queue_len); - let (work_reprocessing_tx, work_reprocessing_rx) = - mpsc::channel(config.max_scheduled_work_queue_len); Self { beacon_processor_tx: BeaconProcessorSend(beacon_processor_tx), beacon_processor_rx, - work_reprocessing_rx, - work_reprocessing_tx, } } } @@ -498,6 +495,12 @@ impl From for WorkEvent { drop_during_sync: false, work: Work::ChainSegmentBackfill(process_fn), }, + ReadyWork::ColumnReconstruction(QueuedColumnReconstruction { process_fn, .. }) => { + Self { + drop_during_sync: true, + work: Work::ColumnReconstruction(process_fn), + } + } } } } @@ -549,32 +552,23 @@ pub enum BlockingOrAsync { Blocking(BlockingFn), Async(AsyncFn), } -pub type GossipAttestationBatch = Vec>>; +pub type GossipAttestationBatch = Vec>; /// Indicates the type of work to be performed and therefore its priority and /// queuing specifics. pub enum Work { GossipAttestation { - attestation: Box>>, - process_individual: Box>) + Send + Sync>, - process_batch: Box) + Send + Sync>, - }, - // Attestation requiring conversion before processing. - // - // For now this is a `SingleAttestation`, but eventually we will switch this around so that - // legacy `Attestation`s are converted and the main processing pipeline operates on - // `SingleAttestation`s. - GossipAttestationToConvert { attestation: Box>, process_individual: Box) + Send + Sync>, + process_batch: Box, }, UnknownBlockAttestation { process_fn: BlockingFn, }, GossipAttestationBatch { - attestations: GossipAttestationBatch, - process_batch: Box) + Send + Sync>, + attestations: GossipAttestationBatch, + process_batch: Box, }, GossipAggregate { aggregate: Box>, @@ -619,6 +613,7 @@ pub enum Work { RpcCustodyColumn(AsyncFn), RpcVerifyDataColumn(AsyncFn), SamplingResult(AsyncFn), + ColumnReconstruction(AsyncFn), IgnoredRpcBlock { process_fn: BlockingFn, }, @@ -638,6 +633,7 @@ pub enum Work { LightClientUpdatesByRangeRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), ApiRequestP1(BlockingOrAsync), + Reprocess(ReprocessQueueMessage), } impl fmt::Debug for Work { @@ -674,6 +670,7 @@ pub enum WorkType { RpcCustodyColumn, RpcVerifyDataColumn, SamplingResult, + ColumnReconstruction, IgnoredRpcBlock, ChainSegment, ChainSegmentBackfill, @@ -691,6 +688,7 @@ pub enum WorkType { LightClientUpdatesByRangeRequest, ApiRequestP0, ApiRequestP1, + Reprocess, } impl Work { @@ -702,7 +700,6 @@ impl Work { fn to_type(&self) -> WorkType { match self { Work::GossipAttestation { .. } => WorkType::GossipAttestation, - Work::GossipAttestationToConvert { .. } => WorkType::GossipAttestationToConvert, Work::GossipAttestationBatch { .. } => WorkType::GossipAttestationBatch, Work::GossipAggregate { .. } => WorkType::GossipAggregate, Work::GossipAggregateBatch { .. } => WorkType::GossipAggregateBatch, @@ -725,6 +722,7 @@ impl Work { Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, Work::RpcVerifyDataColumn { .. } => WorkType::RpcVerifyDataColumn, Work::SamplingResult { .. } => WorkType::SamplingResult, + Work::ColumnReconstruction(_) => WorkType::ColumnReconstruction, Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, Work::ChainSegment { .. } => WorkType::ChainSegment, Work::ChainSegmentBackfill(_) => WorkType::ChainSegmentBackfill, @@ -749,6 +747,7 @@ impl Work { } Work::ApiRequestP0 { .. } => WorkType::ApiRequestP0, Work::ApiRequestP1 { .. } => WorkType::ApiRequestP1, + Work::Reprocess { .. } => WorkType::Reprocess, } } } @@ -773,7 +772,7 @@ struct InboundEvents { /// Used by upstream processes to send new work to the `BeaconProcessor`. event_rx: mpsc::Receiver>, /// Used internally for queuing work ready to be re-processed. - reprocess_work_rx: mpsc::Receiver, + ready_work_rx: mpsc::Receiver, } impl Stream for InboundEvents { @@ -794,7 +793,7 @@ impl Stream for InboundEvents { // Poll for delayed blocks before polling for new work. It might be the case that a delayed // block is required to successfully process some new work. - match self.reprocess_work_rx.poll_recv(cx) { + match self.ready_work_rx.poll_recv(cx) { Poll::Ready(Some(ready_work)) => { return Poll::Ready(Some(InboundEvent::ReprocessingWork(ready_work.into()))); } @@ -845,8 +844,6 @@ impl BeaconProcessor { pub fn spawn_manager( mut self, event_rx: mpsc::Receiver>, - work_reprocessing_tx: mpsc::Sender, - work_reprocessing_rx: mpsc::Receiver, work_journal_tx: Option>, slot_clock: S, maximum_gossip_clock_disparity: Duration, @@ -891,6 +888,8 @@ impl BeaconProcessor { FifoQueue::new(queue_lengths.rpc_verify_data_column_queue); // TODO(das): the sampling_request_queue is never read let mut sampling_result_queue = FifoQueue::new(queue_lengths.sampling_result_queue); + let mut column_reconstruction_queue = + FifoQueue::new(queue_lengths.column_reconstruction_queue); let mut unknown_block_sampling_request_queue = FifoQueue::new(queue_lengths.unknown_block_sampling_request_queue); let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); @@ -932,9 +931,13 @@ impl BeaconProcessor { // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel::(self.config.max_scheduled_work_queue_len); + + let (reprocess_work_tx, reprocess_work_rx) = + mpsc::channel::(self.config.max_scheduled_work_queue_len); + spawn_reprocess_scheduler( ready_work_tx, - work_reprocessing_rx, + reprocess_work_rx, &self.executor, Arc::new(slot_clock), maximum_gossip_clock_disparity, @@ -948,7 +951,7 @@ impl BeaconProcessor { let mut inbound_events = InboundEvents { idle_rx, event_rx, - reprocess_work_rx: ready_work_rx, + ready_work_rx, }; let enable_backfill_rate_limiting = self.config.enable_backfill_rate_limiting; @@ -962,7 +965,7 @@ impl BeaconProcessor { Some(InboundEvent::WorkEvent(event)) if enable_backfill_rate_limiting => { match QueuedBackfillBatch::try_from(event) { Ok(backfill_batch) => { - match work_reprocessing_tx + match reprocess_work_tx .try_send(ReprocessQueueMessage::BackfillSync(backfill_batch)) { Err(e) => { @@ -1024,8 +1027,10 @@ impl BeaconProcessor { .unwrap_or(WORKER_FREED); // We don't care if this message was successfully sent, we only use the journal - // during testing. - let _ = work_journal_tx.try_send(id); + // during testing. We also ignore reprocess messages to ensure our test cases can pass. + if id != "reprocess" { + let _ = work_journal_tx.try_send(id); + } } let can_spawn = self.current_workers < self.config.max_workers; @@ -1072,6 +1077,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = gossip_data_column_queue.pop() { Some(item) + } else if let Some(item) = column_reconstruction_queue.pop() { + Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. } else if let Some(item) = api_request_p0_queue.pop() { Some(item) @@ -1313,11 +1320,16 @@ impl BeaconProcessor { let work_type = work.to_type(); match work { + Work::Reprocess(work_event) => { + if let Err(e) = reprocess_work_tx.try_send(work_event) { + error!( + error = ?e, + "Failed to reprocess work event" + ) + } + } _ if can_spawn => self.spawn_worker(work, idle_tx), Work::GossipAttestation { .. } => attestation_queue.push(work), - Work::GossipAttestationToConvert { .. } => { - attestation_to_convert_queue.push(work) - } // Attestation batches are formed internally within the // `BeaconProcessor`, they are not sent from external services. Work::GossipAttestationBatch { .. } => crit!( @@ -1371,6 +1383,9 @@ impl BeaconProcessor { rpc_verify_data_column_queue.push(work, work_id) } Work::SamplingResult(_) => sampling_result_queue.push(work, work_id), + Work::ColumnReconstruction(_) => { + column_reconstruction_queue.push(work, work_id) + } Work::ChainSegment { .. } => chain_segment_queue.push(work, work_id), Work::ChainSegmentBackfill { .. } => { backfill_chain_segment.push(work, work_id) @@ -1460,6 +1475,7 @@ impl BeaconProcessor { WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), WorkType::RpcVerifyDataColumn => rpc_verify_data_column_queue.len(), WorkType::SamplingResult => sampling_result_queue.len(), + WorkType::ColumnReconstruction => column_reconstruction_queue.len(), WorkType::ChainSegment => chain_segment_queue.len(), WorkType::ChainSegmentBackfill => backfill_chain_segment.len(), WorkType::Status => status_queue.len(), @@ -1482,6 +1498,7 @@ impl BeaconProcessor { WorkType::LightClientUpdatesByRangeRequest => lc_update_range_queue.len(), WorkType::ApiRequestP0 => api_request_p0_queue.len(), WorkType::ApiRequestP1 => api_request_p1_queue.len(), + WorkType::Reprocess => 0, }; metrics::observe_vec( &metrics::BEACON_PROCESSOR_QUEUE_LENGTH, @@ -1559,12 +1576,6 @@ impl BeaconProcessor { } => task_spawner.spawn_blocking(move || { process_individual(*attestation); }), - Work::GossipAttestationToConvert { - attestation, - process_individual, - } => task_spawner.spawn_blocking(move || { - process_individual(*attestation); - }), Work::GossipAttestationBatch { attestations, process_batch, @@ -1602,7 +1613,8 @@ impl BeaconProcessor { | Work::RpcBlobs { process_fn } | Work::RpcCustodyColumn(process_fn) | Work::RpcVerifyDataColumn(process_fn) - | Work::SamplingResult(process_fn) => task_spawner.spawn_async(process_fn), + | Work::SamplingResult(process_fn) + | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) @@ -1638,6 +1650,7 @@ impl BeaconProcessor { | Work::LightClientUpdatesByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } + Work::Reprocess(_) => {} }; } } diff --git a/beacon_node/beacon_processor/src/scheduler/mod.rs b/beacon_node/beacon_processor/src/scheduler/mod.rs new file mode 100644 index 0000000000..e1a076a7c5 --- /dev/null +++ b/beacon_node/beacon_processor/src/scheduler/mod.rs @@ -0,0 +1 @@ +pub mod work_reprocessing_queue; diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs similarity index 94% rename from beacon_node/beacon_processor/src/work_reprocessing_queue.rs rename to beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs index 2b6e72ae0c..855342d8bd 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs @@ -19,6 +19,7 @@ use itertools::Itertools; use logging::crit; use logging::TimeLatch; use slot_clock::SlotClock; +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::future::Future; use std::pin::Pin; @@ -54,6 +55,9 @@ pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); /// For how long to queue sampling requests for reprocessing. pub const QUEUED_SAMPLING_REQUESTS_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue delayed column reconstruction. +pub const QUEUED_RECONSTRUCTION_DELAY: Duration = Duration::from_millis(150); + /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but /// it's nice to have extra protection. @@ -109,6 +113,8 @@ pub enum ReprocessQueueMessage { UnknownBlockSamplingRequest(QueuedSamplingRequest), /// A new backfill batch that needs to be scheduled for processing. BackfillSync(QueuedBackfillBatch), + /// A delayed column reconstruction that needs checking + DelayColumnReconstruction(QueuedColumnReconstruction), } /// Events sent by the scheduler once they are ready for re-processing. @@ -121,6 +127,7 @@ pub enum ReadyWork { LightClientUpdate(QueuedLightClientUpdate), SamplingRequest(QueuedSamplingRequest), BackfillSync(QueuedBackfillBatch), + ColumnReconstruction(QueuedColumnReconstruction), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -176,6 +183,11 @@ pub struct IgnoredRpcBlock { /// A backfill batch work that has been queued for processing later. pub struct QueuedBackfillBatch(pub AsyncFn); +pub struct QueuedColumnReconstruction { + pub block_root: Hash256, + pub process_fn: AsyncFn, +} + impl TryFrom> for QueuedBackfillBatch { type Error = WorkEvent; @@ -212,6 +224,8 @@ enum InboundEvent { ReadyLightClientUpdate(QueuedLightClientUpdateId), /// A backfill batch that was queued is ready for processing. ReadyBackfillSync(QueuedBackfillBatch), + /// A column reconstruction that was queued is ready for processing. + ReadyColumnReconstruction(QueuedColumnReconstruction), /// A message sent to the `ReprocessQueue` Msg(ReprocessQueueMessage), } @@ -234,6 +248,8 @@ struct ReprocessQueue { lc_updates_delay_queue: DelayQueue, /// Queue to manage scheduled sampling requests sampling_requests_delay_queue: DelayQueue, + /// Queue to manage scheduled column reconstructions. + column_reconstructions_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -252,6 +268,8 @@ struct ReprocessQueue { queued_sampling_requests: FnvHashMap, /// Sampling requests per block root. awaiting_sampling_requests_per_block_root: HashMap>, + /// Column reconstruction per block root. + queued_column_reconstructions: HashMap, /// Queued backfill batches queued_backfill_batches: Vec, @@ -343,6 +361,15 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + match self.column_reconstructions_delay_queue.poll_expired(cx) { + Poll::Ready(Some(reconstruction)) => { + return Poll::Ready(Some(InboundEvent::ReadyColumnReconstruction( + reconstruction.into_inner(), + ))); + } + Poll::Ready(None) | Poll::Pending => (), + } + if let Some(next_backfill_batch_event) = self.next_backfill_batch_event.as_mut() { match next_backfill_batch_event.as_mut().poll(cx) { Poll::Ready(_) => { @@ -410,6 +437,7 @@ impl ReprocessQueue { attestations_delay_queue: DelayQueue::new(), lc_updates_delay_queue: DelayQueue::new(), sampling_requests_delay_queue: <_>::default(), + column_reconstructions_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), @@ -419,6 +447,7 @@ impl ReprocessQueue { awaiting_lc_updates_per_parent_root: HashMap::new(), awaiting_sampling_requests_per_block_root: <_>::default(), queued_backfill_batches: Vec::new(), + queued_column_reconstructions: HashMap::new(), next_attestation: 0, next_lc_update: 0, next_sampling_request_update: 0, @@ -817,6 +846,21 @@ impl ReprocessQueue { self.recompute_next_backfill_batch_event(); } } + InboundEvent::Msg(DelayColumnReconstruction(request)) => { + match self.queued_column_reconstructions.entry(request.block_root) { + Entry::Occupied(key) => { + // Push back the reattempted reconstruction + self.column_reconstructions_delay_queue + .reset(key.get(), QUEUED_RECONSTRUCTION_DELAY) + } + Entry::Vacant(vacant) => { + let delay_key = self + .column_reconstructions_delay_queue + .insert(request, QUEUED_RECONSTRUCTION_DELAY); + vacant.insert(delay_key); + } + } + } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.beacon_block_root; @@ -940,6 +984,20 @@ impl ReprocessQueue { _ => crit!("Unexpected return from try_send error"), } } + InboundEvent::ReadyColumnReconstruction(column_reconstruction) => { + self.queued_column_reconstructions + .remove(&column_reconstruction.block_root); + if self + .ready_work_tx + .try_send(ReadyWork::ColumnReconstruction(column_reconstruction)) + .is_err() + { + error!( + hint = "system may be overloaded", + "Ignored scheduled column reconstruction" + ); + } + } } metrics::set_gauge_vec( diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 379b46b4b1..3c4b2572c9 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -10,7 +10,6 @@ beacon_processor = { workspace = true } directory = { workspace = true } dirs = { workspace = true } environment = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } eth2_config = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a581d5c128..479b4b3192 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -11,17 +11,15 @@ use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, - eth1_chain::{CachingEth1Backend, Eth1Chain}, slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, store::{HotColdDB, ItemStore, StoreConfig}, - BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, MigratorConfig, ServerSentEventHandler, }; use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; -use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, Error as ApiError, Timeouts, @@ -29,7 +27,7 @@ use eth2::{ use execution_layer::test_utils::generate_genesis_header; use execution_layer::ExecutionLayer; use futures::channel::mpsc::Receiver; -use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; +use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; @@ -37,14 +35,12 @@ use rand::rngs::{OsRng, StdRng}; use rand::SeedableRng; use slasher::Slasher; use slasher_service::SlasherService; -use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; -use tokio::sync::oneshot; use tracing::{debug, info, warn}; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec, @@ -80,7 +76,6 @@ pub struct ClientBuilder { chain_spec: Option>, beacon_chain_builder: Option>, beacon_chain: Option>>, - eth1_service: Option, network_globals: Option>>, network_senders: Option>, libp2p_registry: Option, @@ -95,11 +90,10 @@ pub struct ClientBuilder { eth_spec_instance: T::EthSpec, } -impl - ClientBuilder> +impl + ClientBuilder> where TSlotClock: SlotClock + Clone + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, @@ -115,7 +109,6 @@ where chain_spec: None, beacon_chain_builder: None, beacon_chain: None, - eth1_service: None, network_globals: None, network_senders: None, libp2p_registry: None, @@ -261,7 +254,7 @@ where client_genesis }; - let (beacon_chain_builder, eth1_service_option) = match client_genesis { + let beacon_chain_builder = match client_genesis { ClientGenesis::Interop { validator_count, genesis_time, @@ -274,7 +267,7 @@ where None, &spec, )?; - builder.genesis_state(genesis_state).map(|v| (v, None))? + builder.genesis_state(genesis_state)? } ClientGenesis::InteropMerge { validator_count, @@ -289,7 +282,7 @@ where execution_payload_header, &spec, )?; - builder.genesis_state(genesis_state).map(|v| (v, None))? + builder.genesis_state(genesis_state)? } ClientGenesis::GenesisState => { info!("Starting from known genesis state"); @@ -337,7 +330,7 @@ where } } - builder.genesis_state(genesis_state).map(|v| (v, None))? + builder.genesis_state(genesis_state)? } ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, @@ -366,14 +359,12 @@ where }; let genesis_state = genesis_state(&runtime_context, &config).await?; - builder - .weak_subjectivity_state( - anchor_state, - anchor_block, - anchor_blobs, - genesis_state, - ) - .map(|v| (v, None))? + builder.weak_subjectivity_state( + anchor_state, + anchor_block, + anchor_blobs, + genesis_state, + )? } ClientGenesis::CheckpointSyncUrl { url } => { info!( @@ -391,47 +382,6 @@ where )), ); - let deposit_snapshot = if config.sync_eth1_chain { - // We want to fetch deposit snapshot before fetching the finalized beacon state to - // ensure that the snapshot is not newer than the beacon state that satisfies the - // deposit finalization conditions - debug!("Downloading deposit snapshot"); - let deposit_snapshot_result = remote - .get_deposit_snapshot() - .await - .map_err(|e| match e { - ApiError::InvalidSsz(e) => format!( - "Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \ - node for the correct network", - e - ), - e => format!("Error fetching deposit snapshot from remote: {:?}", e), - }); - match deposit_snapshot_result { - Ok(Some(deposit_snapshot)) => { - if deposit_snapshot.is_valid() { - Some(deposit_snapshot) - } else { - warn!("Remote BN sent invalid deposit snapshot!"); - None - } - } - Ok(None) => { - warn!("Remote BN does not support EIP-4881 fast deposit sync"); - None - } - Err(e) => { - warn!( - error = e, - "Remote BN does not support EIP-4881 fast deposit sync" - ); - None - } - } - } else { - None - }; - debug!("Downloading finalized state"); let state = remote .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) @@ -491,120 +441,14 @@ where "Loaded checkpoint block and state" ); - let service = - deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( - config.eth1, - spec.clone(), - &snapshot, - ) { - Ok(service) => { - info!( - deposits_loaded = snapshot.deposit_count, - "Loaded deposit tree snapshot" - ); - Some(service) - } - Err(e) => { - warn!(error = ?e, - "Unable to load deposit snapshot" - ); - None - } - }); - - builder - .weak_subjectivity_state(state, block, blobs, genesis_state) - .map(|v| (v, service))? + builder.weak_subjectivity_state(state, block, blobs, genesis_state)? } ClientGenesis::DepositContract => { - info!( - eth1_endpoints = ?config.eth1.endpoint, - contract_deploy_block = config.eth1.deposit_contract_deploy_block, - deposit_contract = &config.eth1.deposit_contract_address, - "Waiting for eth2 genesis from eth1" - ); - - let genesis_service = - Eth1GenesisService::new(config.eth1, context.eth2_config().spec.clone())?; - - // If the HTTP API server is enabled, start an instance of it where it only - // contains a reference to the eth1 service (all non-eth1 endpoints will fail - // gracefully). - // - // Later in this function we will shutdown this temporary "waiting for genesis" - // server so the real one can be started later. - let (exit_tx, exit_rx) = oneshot::channel::<()>(); - let http_listen_opt = if self.http_api_config.enabled { - #[allow(clippy::type_complexity)] - let ctx: Arc< - http_api::Context< - Witness, - >, - > = Arc::new(http_api::Context { - config: self.http_api_config.clone(), - chain: None, - network_senders: None, - network_globals: None, - beacon_processor_send: None, - beacon_processor_reprocess_send: None, - eth1_service: Some(genesis_service.eth1_service.clone()), - sse_logging_components: runtime_context.sse_logging_components.clone(), - }); - - // Discard the error from the oneshot. - let exit_future = async { - let _ = exit_rx.await; - }; - - let (listen_addr, server) = http_api::serve(ctx, exit_future) - .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; - - let http_api_task = async move { - server.await; - debug!("HTTP API server task ended"); - }; - - context - .clone() - .executor - .spawn_without_exit(http_api_task, "http-api"); - - Some(listen_addr) - } else { - None - }; - - let genesis_state = genesis_service - .wait_for_genesis_state(Duration::from_millis( - ETH1_GENESIS_UPDATE_INTERVAL_MILLIS, - )) - .await?; - - let _ = exit_tx.send(()); - - if let Some(http_listen) = http_listen_opt { - // This is a bit of a hack to ensure that the HTTP server has indeed shutdown. - // - // We will restart it again after we've finished setting up for genesis. - while TcpListener::bind(http_listen).is_err() { - warn!( - port = %http_listen, - "Waiting for HTTP server port to open" - ); - tokio::time::sleep(Duration::from_secs(1)).await; - } - } - - builder - .genesis_state(genesis_state) - .map(|v| (v, Some(genesis_service.into_core_service())))? + return Err("Loading genesis from deposit contract no longer supported".to_string()) } - ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, + ClientGenesis::FromStore => builder.resume_from_db()?, }; - if config.sync_eth1_chain { - self.eth1_service = eth1_service_option; - } self.beacon_chain_builder = Some(beacon_chain_builder); Ok(self) } @@ -638,7 +482,6 @@ where context.executor, libp2p_registry.as_mut(), beacon_processor_channels.beacon_processor_tx.clone(), - beacon_processor_channels.work_reprocessing_tx.clone(), ) .await .map_err(|e| format!("Failed to start network: {:?}", e))?; @@ -755,7 +598,7 @@ where #[allow(clippy::type_complexity)] pub fn build( mut self, - ) -> Result>, String> { + ) -> Result>, String> { let runtime_context = self .runtime_context .as_ref() @@ -775,11 +618,7 @@ where chain: self.beacon_chain.clone(), network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), - eth1_service: self.eth1_service.clone(), beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()), - beacon_processor_reprocess_send: Some( - beacon_processor_channels.work_reprocessing_tx.clone(), - ), sse_logging_components: runtime_context.sse_logging_components.clone(), }); @@ -843,8 +682,6 @@ where } .spawn_manager( beacon_processor_channels.beacon_processor_rx, - beacon_processor_channels.work_reprocessing_tx.clone(), - beacon_processor_channels.work_reprocessing_rx, None, beacon_chain.slot_clock.clone(), beacon_chain.spec.maximum_gossip_clock_disparity(), @@ -918,7 +755,7 @@ where compute_light_client_updates( &inner_chain, light_client_server_rv, - beacon_processor_channels.work_reprocessing_tx, + beacon_processor_channels.beacon_processor_tx, ) .await }, @@ -950,11 +787,10 @@ where } } -impl - ClientBuilder> +impl + ClientBuilder> where TSlotClock: SlotClock + Clone + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, @@ -987,11 +823,10 @@ where } } -impl - ClientBuilder, BeaconNodeBackend>> +impl + ClientBuilder, BeaconNodeBackend>> where TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { /// Specifies that the `Client` should use a `HotColdDB` database. @@ -1002,11 +837,6 @@ where blobs_path: &Path, config: StoreConfig, ) -> Result { - let context = self - .runtime_context - .as_ref() - .ok_or("disk_store requires a log")? - .service_context("freezer_db".into()); let spec = self .chain_spec .clone() @@ -1015,22 +845,8 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); - // Optionally grab the genesis state root. - // This will only be required if a DB upgrade to V22 is needed. - let genesis_state_root = context - .eth2_network_config - .as_ref() - .and_then(|config| config.genesis_state_root::().transpose()) - .transpose()?; - - let schema_upgrade = |db, from, to| { - migrate_schema::>( - db, - genesis_state_root, - from, - to, - ) - }; + let schema_upgrade = + |db, from, to| migrate_schema::>(db, from, to); let store = HotColdDB::open( hot_path, @@ -1046,102 +862,8 @@ where } } -impl - ClientBuilder, E, THotStore, TColdStore>> +impl ClientBuilder> where - TSlotClock: SlotClock + 'static, - E: EthSpec + 'static, - THotStore: ItemStore + 'static, - TColdStore: ItemStore + 'static, -{ - /// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node - /// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during - /// block production. - pub async fn caching_eth1_backend(mut self, config: Eth1Config) -> Result { - let context = self - .runtime_context - .as_ref() - .ok_or("caching_eth1_backend requires a runtime_context")? - .service_context("deposit_contract_rpc".into()); - let beacon_chain_builder = self - .beacon_chain_builder - .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; - let spec = self - .chain_spec - .clone() - .ok_or("caching_eth1_backend requires a chain spec")?; - - let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { - eth1_service_from_genesis.update_config(config)?; - - // This cache is not useful because it's first (earliest) block likely the block that - // triggered genesis. - // - // In order to vote we need to be able to go back at least 2 * `ETH1_FOLLOW_DISTANCE` - // from the genesis-triggering block. Presently the block cache does not support - // importing blocks with decreasing block numbers, it only accepts them in increasing - // order. If this turns out to be a bottleneck we can update the block cache to allow - // adding earlier blocks too. - eth1_service_from_genesis.drop_block_cache(); - - CachingEth1Backend::from_service(eth1_service_from_genesis) - } else if config.purge_cache { - CachingEth1Backend::new(config, spec)? - } else { - beacon_chain_builder - .get_persisted_eth1_backend()? - .map(|persisted| { - Eth1Chain::from_ssz_container(&persisted, config.clone(), spec.clone()) - .map(|chain| chain.into_backend()) - }) - .unwrap_or_else(|| CachingEth1Backend::new(config, spec.clone()))? - }; - - self.eth1_service = Some(backend.core.clone()); - - // Starts the service that connects to an eth1 node and periodically updates caches. - backend.start(context.executor); - - self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend))); - - Ok(self) - } - - /// Do not use any eth1 backend. The client will not be able to produce beacon blocks. - pub fn no_eth1_backend(mut self) -> Result { - let beacon_chain_builder = self - .beacon_chain_builder - .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; - - self.beacon_chain_builder = Some(beacon_chain_builder.no_eth1_backend()); - - Ok(self) - } - - /// Use an eth1 backend that can produce blocks but is not connected to an Eth1 node. - /// - /// This backend will never produce deposits so it's impossible to add validators after - /// genesis. The `Eth1Data` votes will be deterministic junk data. - /// - /// ## Notes - /// - /// The client is given the `CachingEth1Backend` type, but the http backend is never started and the - /// caches are never used. - pub fn dummy_eth1_backend(mut self) -> Result { - let beacon_chain_builder = self - .beacon_chain_builder - .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; - - self.beacon_chain_builder = Some(beacon_chain_builder.dummy_eth1_backend()?); - - Ok(self) - } -} - -impl - ClientBuilder> -where - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, diff --git a/beacon_node/client/src/compute_light_client_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs index fab284c428..75fa22e795 100644 --- a/beacon_node/client/src/compute_light_client_updates.rs +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -1,8 +1,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent}; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; +use beacon_processor::{BeaconProcessorSend, Work, WorkEvent}; use futures::channel::mpsc::Receiver; use futures::StreamExt; -use tokio::sync::mpsc::Sender; use tracing::error; // Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent @@ -14,7 +14,7 @@ pub(crate) const LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; pub async fn compute_light_client_updates( chain: &BeaconChain, mut light_client_server_rv: Receiver>, - reprocess_tx: Sender, + beacon_processor_send: BeaconProcessorSend, ) { // Should only receive events for recent blocks, import_block filters by blocks close to clock. // @@ -31,7 +31,13 @@ pub async fn compute_light_client_updates( }); let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root }; - if reprocess_tx.try_send(msg).is_err() { + if beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: true, + work: Work::Reprocess(msg), + }) + .is_err() + { error!(%parent_root,"Failed to inform light client update") }; } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index becc781ed3..495df7d5f7 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -59,7 +59,6 @@ pub struct Config { /// Path where the blobs database will be located if blobs should be in a separate database. pub blobs_db_path: Option, pub log_file: PathBuf, - pub sync_eth1_chain: bool, /// Graffiti to be inserted everytime we create a block if the validator doesn't specify. pub beacon_graffiti: GraffitiOrigin, pub validator_monitor: ValidatorMonitorConfig, @@ -70,7 +69,6 @@ pub struct Config { pub store: store::StoreConfig, pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, - pub eth1: eth1::Config, pub execution_layer: Option, pub trusted_setup: TrustedSetup, pub http_api: http_api::Config, @@ -99,8 +97,6 @@ impl Default for Config { store: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), - sync_eth1_chain: true, - eth1: <_>::default(), execution_layer: None, trusted_setup, beacon_graffiti: GraffitiOrigin::default(), diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 0b6550c208..916dae6db0 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -10,7 +10,7 @@ use lighthouse_network::{Enr, Multiaddr, NetworkGlobals}; use std::net::SocketAddr; use std::sync::Arc; -pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend}; +pub use beacon_chain::BeaconChainTypes; pub use builder::ClientBuilder; pub use config::{ClientGenesis, Config as ClientConfig}; pub use eth2_config::Eth2Config; diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 53c9c85c00..ea9fbe2894 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -60,7 +60,6 @@ pub fn spawn_notifier( wait_time = estimated_time_pretty(Some(next_slot.as_secs() as f64)), "Waiting for genesis" ); - eth1_logging(&beacon_chain); bellatrix_readiness_logging(Slot::new(0), &beacon_chain).await; capella_readiness_logging(Slot::new(0), &beacon_chain).await; genesis_execution_payload_logging(&beacon_chain).await; @@ -309,7 +308,6 @@ pub fn spawn_notifier( ); } - eth1_logging(&beacon_chain); bellatrix_readiness_logging(current_slot, &beacon_chain).await; capella_readiness_logging(current_slot, &beacon_chain).await; deneb_readiness_logging(current_slot, &beacon_chain).await; @@ -677,53 +675,6 @@ async fn genesis_execution_payload_logging(beacon_chain: &B } } -fn eth1_logging(beacon_chain: &BeaconChain) { - let current_slot_opt = beacon_chain.slot().ok(); - - // Perform some logging about the eth1 chain - if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { - // No need to do logging if using the dummy backend. - if eth1_chain.is_dummy_backend() { - return; - } - - if let Some(status) = eth1_chain.sync_status( - beacon_chain.genesis_time, - current_slot_opt, - &beacon_chain.spec, - ) { - debug!( - eth1_head_block = status.head_block_number, - latest_cached_block_number = status.latest_cached_block_number, - latest_cached_timestamp = status.latest_cached_block_timestamp, - voting_target_timestamp = status.voting_target_timestamp, - ready = status.lighthouse_is_cached_and_ready, - "Eth1 cache sync status" - ); - - if !status.lighthouse_is_cached_and_ready { - let voting_target_timestamp = status.voting_target_timestamp; - - let distance = status - .latest_cached_block_timestamp - .map(|latest| { - voting_target_timestamp.saturating_sub(latest) - / beacon_chain.spec.seconds_per_eth1_block - }) - .map(|distance| distance.to_string()) - .unwrap_or_else(|| "initializing deposits".to_string()); - - warn!( - est_blocks_remaining = distance, - "Syncing deposit contract block cache" - ); - } - } else { - error!("Unable to determine deposit contract sync status"); - } - } -} - /// Returns the peer count, returning something helpful if it's `usize::MAX` (effectively a /// `None` value). fn peer_count_pretty(peer_count: usize) -> String { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml deleted file mode 100644 index f834ad7eef..0000000000 --- a/beacon_node/eth1/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "eth1" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[dependencies] -eth2 = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -execution_layer = { workspace = true } -futures = { workspace = true } -logging = { workspace = true } -merkle_proof = { workspace = true } -metrics = { workspace = true } -parking_lot = { workspace = true } -sensitive_url = { workspace = true } -serde = { workspace = true } -state_processing = { workspace = true } -superstruct = { workspace = true } -task_executor = { workspace = true } -tokio = { workspace = true } -tracing = { workspace = true } -tree_hash = { workspace = true } -types = { workspace = true } - -[dev-dependencies] -environment = { workspace = true } -eth1_test_rig = { workspace = true } -serde_yaml = { workspace = true } diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs deleted file mode 100644 index 9c840aea21..0000000000 --- a/beacon_node/eth1/src/block_cache.rs +++ /dev/null @@ -1,303 +0,0 @@ -use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; -use std::ops::RangeInclusive; - -pub use eth2::lighthouse::Eth1Block; -use eth2::types::Hash256; -use std::sync::Arc; - -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - /// The timestamp of each block equal to or later than the block prior to it. - InconsistentTimestamp { parent: u64, child: u64 }, - /// Some `Eth1Block` was provided with the same block number but different data. The source - /// of eth1 data is inconsistent. - Conflicting(u64), - /// The given block was not one block number higher than the highest known block number. - NonConsecutive { given: u64, expected: u64 }, - /// Some invariant was violated, there is a likely bug in the code. - Internal(String), -} - -/// Stores block and deposit contract information and provides queries based upon the block -/// timestamp. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct BlockCache { - blocks: Vec>, - #[ssz(skip_serializing, skip_deserializing)] - by_hash: HashMap>, -} - -impl BlockCache { - /// Returns the number of blocks stored in `self`. - pub fn len(&self) -> usize { - self.blocks.len() - } - - /// True if the cache does not store any blocks. - pub fn is_empty(&self) -> bool { - self.blocks.is_empty() - } - - /// Returns the earliest (lowest timestamp) block, if any. - pub fn earliest_block(&self) -> Option<&Eth1Block> { - self.blocks.first().map(|ptr| ptr.as_ref()) - } - - /// Returns the latest (highest timestamp) block, if any. - pub fn latest_block(&self) -> Option<&Eth1Block> { - self.blocks.last().map(|ptr| ptr.as_ref()) - } - - /// Returns the timestamp of the earliest block in the cache (if any). - pub fn earliest_block_timestamp(&self) -> Option { - self.blocks.first().map(|block| block.timestamp) - } - - /// Returns the timestamp of the latest block in the cache (if any). - pub fn latest_block_timestamp(&self) -> Option { - self.blocks.last().map(|block| block.timestamp) - } - - /// Returns the lowest block number stored. - pub fn lowest_block_number(&self) -> Option { - self.blocks.first().map(|block| block.number) - } - - /// Returns the highest block number stored. - pub fn highest_block_number(&self) -> Option { - self.blocks.last().map(|block| block.number) - } - - /// Returns an iterator over all blocks. - /// - /// Blocks a guaranteed to be returned with; - /// - /// - Monotonically increasing block numbers. - /// - Non-uniformly increasing block timestamps. - pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.blocks.iter().map(|ptr| ptr.as_ref()) - } - - /// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the - /// rest. - /// - /// If `len` is greater than the vector's current length, this has no effect. - pub fn truncate(&mut self, len: usize) { - if len < self.blocks.len() { - let remaining = self.blocks.split_off(self.blocks.len() - len); - for block in &self.blocks { - self.by_hash.remove(&block.hash); - } - self.blocks = remaining; - } - } - - /// Returns the range of block numbers stored in the block cache. All blocks in this range can - /// be accessed. - fn available_block_numbers(&self) -> Option> { - Some(self.blocks.first()?.number..=self.blocks.last()?.number) - } - - /// Returns a block with the corresponding number, if any. - pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> { - self.blocks - .get( - self.blocks - .as_slice() - .binary_search_by(|block| block.number.cmp(&block_number)) - .ok()?, - ) - .map(|ptr| ptr.as_ref()) - } - - /// Returns a block with the corresponding hash, if any. - pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> { - self.by_hash.get(block_hash).map(|ptr| ptr.as_ref()) - } - - /// Rebuilds the by_hash map - pub fn rebuild_by_hash_map(&mut self) { - self.by_hash.clear(); - for block in self.blocks.iter() { - self.by_hash.insert(block.hash, block.clone()); - } - } - - /// Insert an `Eth1Snapshot` into `self`, allowing future queries. - /// - /// Allows inserting either: - /// - /// - The root block (i.e., any block if there are no existing blocks), or, - /// - An immediate child of the most recent (highest block number) block. - /// - /// ## Errors - /// - /// - If the cache is not empty and `item.block.block_number - 1` is not already in `self`. - /// - If `item.block.block_number` is in `self`, but is not identical to the supplied - /// `Eth1Snapshot`. - /// - If `item.block.timestamp` is prior to the parent. - pub fn insert_root_or_child(&mut self, block: Eth1Block) -> Result<(), Error> { - let expected_block_number = self - .highest_block_number() - .map(|n| n + 1) - .unwrap_or_else(|| block.number); - - // If there are already some cached blocks, check to see if the new block number is one of - // them. - // - // If the block is already known, check to see the given block is identical to it. If not, - // raise an inconsistency error. This is mostly likely caused by some fork on the eth1 - // chain. - if let Some(local) = self.available_block_numbers() { - if local.contains(&block.number) { - let known_block = self.block_by_number(block.number).ok_or_else(|| { - Error::Internal("An expected block was not present".to_string()) - })?; - - if known_block == &block { - return Ok(()); - } else { - return Err(Error::Conflicting(block.number)); - }; - } - } - - // Only permit blocks when it's either: - // - // - The first block inserted. - // - Exactly one block number higher than the highest known block number. - if block.number != expected_block_number { - return Err(Error::NonConsecutive { - given: block.number, - expected: expected_block_number, - }); - } - - // If the block is not the first block inserted, ensure that its timestamp is not higher - // than its parents. - if let Some(previous_block) = self.blocks.last() { - if previous_block.timestamp > block.timestamp { - return Err(Error::InconsistentTimestamp { - parent: previous_block.timestamp, - child: block.timestamp, - }); - } - } - - let ptr = Arc::new(block); - self.by_hash.insert(ptr.hash, ptr.clone()); - self.blocks.push(ptr); - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use types::FixedBytesExtended; - - use super::*; - - fn get_block(i: u64, interval_secs: u64) -> Eth1Block { - Eth1Block { - hash: Hash256::from_low_u64_be(i), - timestamp: i * interval_secs, - number: i, - deposit_root: Some(Hash256::from_low_u64_be(i << 32)), - deposit_count: Some(i), - } - } - - fn get_blocks(n: usize, interval_secs: u64) -> Vec { - (0..n as u64).map(|i| get_block(i, interval_secs)).collect() - } - - fn insert(cache: &mut BlockCache, s: Eth1Block) -> Result<(), Error> { - cache.insert_root_or_child(s) - } - - #[test] - fn truncate() { - let n = 16; - let blocks = get_blocks(n, 10); - - let mut cache = BlockCache::default(); - - for block in blocks { - insert(&mut cache, block.clone()).expect("should add consecutive blocks"); - } - - for len in &[0, 1, 2, 3, 4, 8, 15, 16] { - let mut cache = cache.clone(); - - cache.truncate(*len); - - assert_eq!( - cache.blocks.len(), - *len, - "should truncate to length: {}", - *len - ); - } - - let mut cache_2 = cache; - cache_2.truncate(17); - assert_eq!( - cache_2.blocks.len(), - n, - "truncate to larger than n should be a no-op" - ); - } - - #[test] - fn inserts() { - let n = 16; - let blocks = get_blocks(n, 10); - - let mut cache = BlockCache::default(); - - for block in blocks { - insert(&mut cache, block.clone()).expect("should add consecutive blocks"); - } - - // No error for re-adding a block identical to one that exists. - assert!(insert(&mut cache, get_block(n as u64 - 1, 10)).is_ok()); - - // Error for re-adding a block that is different to the one that exists. - assert!(insert(&mut cache, get_block(n as u64 - 1, 11)).is_err()); - - // Error for adding non-consecutive blocks. - assert!(insert(&mut cache, get_block(n as u64 + 1, 10)).is_err()); - assert!(insert(&mut cache, get_block(n as u64 + 2, 10)).is_err()); - - // Error for adding timestamp prior to previous. - assert!(insert(&mut cache, get_block(n as u64, 1)).is_err()); - // Double check to make sure previous test was only affected by timestamp. - assert!(insert(&mut cache, get_block(n as u64, 10)).is_ok()); - } - - #[test] - fn duplicate_timestamp() { - let mut blocks = get_blocks(7, 10); - - blocks[0].timestamp = 0; - blocks[1].timestamp = 10; - blocks[2].timestamp = 10; - blocks[3].timestamp = 20; - blocks[4].timestamp = 30; - blocks[5].timestamp = 40; - blocks[6].timestamp = 40; - - let mut cache = BlockCache::default(); - - for block in &blocks { - insert(&mut cache, block.clone()) - .expect("should add consecutive blocks with duplicate timestamps"); - } - - let blocks = blocks.into_iter().map(Arc::new).collect::>(); - - assert_eq!(cache.blocks, blocks, "should have added all blocks"); - } -} diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs deleted file mode 100644 index a2d4a1cf06..0000000000 --- a/beacon_node/eth1/src/deposit_cache.rs +++ /dev/null @@ -1,1090 +0,0 @@ -use crate::{DepositLog, Eth1Block}; -use ssz_derive::{Decode, Encode}; -use state_processing::common::DepositDataTree; -use std::cmp::Ordering; -use superstruct::superstruct; -use tree_hash::TreeHash; -use types::{Deposit, DepositTreeSnapshot, Hash256, DEPOSIT_TREE_DEPTH}; - -#[derive(Debug, PartialEq)] -pub enum Error { - /// A deposit log was added when a prior deposit was not already in the cache. - /// - /// Logs have to be added with monotonically-increasing block numbers. - NonConsecutive { log_index: u64, expected: usize }, - /// The eth1 event log data was unable to be parsed. - LogParse(String), - /// There are insufficient deposits in the cache to fulfil the request. - InsufficientDeposits { - known_deposits: usize, - requested: u64, - }, - /// A log with the given index is already present in the cache and it does not match the one - /// provided. - DuplicateDistinctLog(u64), - /// Attempted to insert log with given index after the log had been finalized - FinalizedLogInsert { - log_index: u64, - finalized_index: u64, - }, - /// The deposit count must always be large enough to account for the requested deposit range. - /// - /// E.g., you cannot request deposit 10 when the deposit count is 9. - DepositCountInvalid { deposit_count: u64, range_end: u64 }, - /// You can't request deposits on or before the finalized deposit - DepositRangeInvalid { - range_start: u64, - finalized_count: u64, - }, - /// You can't finalize what's already been finalized and the cache must have the logs - /// that you wish to finalize - InvalidFinalizeIndex { - requested_count: u64, - currently_finalized: u64, - deposit_count: u64, - }, - /// Error with the merkle tree for deposits. - DepositTree(merkle_proof::MerkleTreeError), - /// An unexpected condition was encountered. - Internal(String), - /// This is for errors that should never occur - PleaseNotifyTheDevs, -} - -pub type SszDepositCache = SszDepositCacheV13; - -#[superstruct( - variants(V13), - variant_attributes(derive(Encode, Decode, Clone)), - no_enum -)] -pub struct SszDepositCache { - pub logs: Vec, - pub leaves: Vec, - pub deposit_contract_deploy_block: u64, - pub finalized_deposit_count: u64, - pub finalized_block_height: u64, - pub deposit_tree_snapshot: Option, - pub deposit_roots: Vec, -} - -impl SszDepositCache { - pub fn from_deposit_cache(cache: &DepositCache) -> Self { - Self { - logs: cache.logs.clone(), - leaves: cache.leaves.clone(), - deposit_contract_deploy_block: cache.deposit_contract_deploy_block, - finalized_deposit_count: cache.finalized_deposit_count, - finalized_block_height: cache.finalized_block_height, - deposit_tree_snapshot: cache.deposit_tree.get_snapshot(), - deposit_roots: cache.deposit_roots.clone(), - } - } - - pub fn to_deposit_cache(&self) -> Result { - let deposit_tree = self - .deposit_tree_snapshot - .as_ref() - .map(|snapshot| { - let mut tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) - .map_err(|e| format!("Invalid SszDepositCache: {:?}", e))?; - for leaf in &self.leaves { - tree.push_leaf(*leaf).map_err(|e| { - format!("Invalid SszDepositCache: unable to push leaf: {:?}", e) - })?; - } - Ok::<_, String>(tree) - }) - .unwrap_or_else(|| { - // deposit_tree_snapshot = None (tree was never finalized) - // Create DepositDataTree from leaves - Ok(DepositDataTree::create( - &self.leaves, - self.leaves.len(), - DEPOSIT_TREE_DEPTH, - )) - })?; - - // Check for invalid SszDepositCache conditions - if self.leaves.len() != self.logs.len() { - return Err("Invalid SszDepositCache: logs and leaves should have equal length".into()); - } - // `deposit_roots` also includes the zero root - if self.leaves.len() + 1 != self.deposit_roots.len() { - return Err( - "Invalid SszDepositCache: deposit_roots length must be only one more than leaves" - .into(), - ); - } - Ok(DepositCache { - logs: self.logs.clone(), - leaves: self.leaves.clone(), - deposit_contract_deploy_block: self.deposit_contract_deploy_block, - finalized_deposit_count: self.finalized_deposit_count, - finalized_block_height: self.finalized_block_height, - deposit_tree, - deposit_roots: self.deposit_roots.clone(), - }) - } -} - -/// Mirrors the merkle tree of deposits in the eth1 deposit contract. -/// -/// Provides `Deposit` objects with merkle proofs included. -#[cfg_attr(test, derive(PartialEq))] -pub struct DepositCache { - logs: Vec, - leaves: Vec, - deposit_contract_deploy_block: u64, - finalized_deposit_count: u64, - finalized_block_height: u64, - /// An incremental merkle tree which represents the current state of the - /// deposit contract tree. - deposit_tree: DepositDataTree, - /// Vector of deposit roots. `deposit_roots[i]` denotes `deposit_root` at - /// `deposit_index` `i`. - deposit_roots: Vec, -} - -impl Default for DepositCache { - fn default() -> Self { - let deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let deposit_roots = vec![deposit_tree.root()]; - DepositCache { - logs: Vec::new(), - leaves: Vec::new(), - deposit_contract_deploy_block: 1, - finalized_deposit_count: 0, - finalized_block_height: 0, - deposit_tree, - deposit_roots, - } - } -} - -#[derive(Debug, PartialEq)] -pub enum DepositCacheInsertOutcome { - Inserted, - Duplicate, -} - -impl DepositCache { - /// Create new `DepositCache` given block number at which deposit - /// contract was deployed. - pub fn new(deposit_contract_deploy_block: u64) -> Self { - DepositCache { - deposit_contract_deploy_block, - finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), - ..Self::default() - } - } - - pub fn from_deposit_snapshot( - deposit_contract_deploy_block: u64, - snapshot: &DepositTreeSnapshot, - ) -> Result { - let deposit_tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) - .map_err(|e| format!("Invalid DepositSnapshot: {:?}", e))?; - Ok(DepositCache { - logs: Vec::new(), - leaves: Vec::new(), - deposit_contract_deploy_block, - finalized_deposit_count: snapshot.deposit_count, - finalized_block_height: snapshot.execution_block_height, - deposit_tree, - deposit_roots: vec![snapshot.deposit_root], - }) - } - - /// Returns the number of deposits the cache stores - pub fn len(&self) -> usize { - self.finalized_deposit_count as usize + self.logs.len() - } - - /// True if the cache does not store any blocks. - pub fn is_empty(&self) -> bool { - self.finalized_deposit_count != 0 && self.logs.is_empty() - } - - /// Returns the block number for the most recent deposit in the cache. - pub fn latest_block_number(&self) -> u64 { - self.logs - .last() - .map(|log| log.block_number) - .unwrap_or(self.finalized_block_height) - } - - /// Returns an iterator over all the logs in `self` that aren't finalized. - pub fn iter(&self) -> impl Iterator { - self.logs.iter() - } - - /// Returns the deposit log with INDEX i. - pub fn get_log(&self, i: usize) -> Option<&DepositLog> { - let finalized_deposit_count = self.finalized_deposit_count as usize; - if i < finalized_deposit_count { - None - } else { - self.logs.get(i - finalized_deposit_count) - } - } - - /// Returns the deposit root with DEPOSIT COUNT (not index) i - pub fn get_root(&self, i: usize) -> Option<&Hash256> { - let finalized_deposit_count = self.finalized_deposit_count as usize; - if i < finalized_deposit_count { - None - } else { - self.deposit_roots.get(i - finalized_deposit_count) - } - } - - /// Returns the finalized deposit count - pub fn finalized_deposit_count(&self) -> u64 { - self.finalized_deposit_count - } - - /// Finalizes the cache up to `eth1_block.deposit_count`. - pub fn finalize(&mut self, eth1_block: Eth1Block) -> Result<(), Error> { - let deposits_to_finalize = eth1_block.deposit_count.ok_or_else(|| { - Error::Internal("Eth1Block did not contain deposit_count".to_string()) - })?; - - let currently_finalized = self.finalized_deposit_count; - if deposits_to_finalize > self.len() as u64 || deposits_to_finalize <= currently_finalized { - Err(Error::InvalidFinalizeIndex { - requested_count: deposits_to_finalize, - currently_finalized, - deposit_count: self.len() as u64, - }) - } else { - let finalized_log = self - .get_log((deposits_to_finalize - 1) as usize) - .cloned() - .ok_or(Error::PleaseNotifyTheDevs)?; - let drop = (deposits_to_finalize - currently_finalized) as usize; - self.deposit_tree - .finalize(eth1_block.into()) - .map_err(Error::DepositTree)?; - self.logs.drain(0..drop); - self.leaves.drain(0..drop); - self.deposit_roots.drain(0..drop); - self.finalized_deposit_count = deposits_to_finalize; - self.finalized_block_height = finalized_log.block_number; - - Ok(()) - } - } - - /// Returns the deposit tree snapshot (if tree is finalized) - pub fn get_deposit_snapshot(&self) -> Option { - self.deposit_tree.get_snapshot() - } - - /// Adds `log` to self. - /// - /// This function enforces that `logs` are imported one-by-one with no gaps between - /// `log.index`, starting at `log.index == 0`. - /// - /// ## Errors - /// - /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). - /// - If a log with `log.index` is already known, but the given `log` is distinct to it. - pub fn insert_log(&mut self, log: DepositLog) -> Result { - match log.index.cmp(&(self.len() as u64)) { - Ordering::Equal => { - let deposit = log.deposit_data.tree_hash_root(); - // should push to deposit_tree first because it's fallible - self.deposit_tree - .push_leaf(deposit) - .map_err(Error::DepositTree)?; - self.leaves.push(deposit); - self.logs.push(log); - self.deposit_roots.push(self.deposit_tree.root()); - Ok(DepositCacheInsertOutcome::Inserted) - } - Ordering::Less => { - let mut compare_index = log.index as usize; - if log.index < self.finalized_deposit_count { - return Err(Error::FinalizedLogInsert { - log_index: log.index, - finalized_index: self.finalized_deposit_count - 1, - }); - } else { - compare_index -= self.finalized_deposit_count as usize; - } - if self.logs[compare_index] == log { - Ok(DepositCacheInsertOutcome::Duplicate) - } else { - Err(Error::DuplicateDistinctLog(log.index)) - } - } - Ordering::Greater => Err(Error::NonConsecutive { - log_index: log.index, - expected: self.logs.len(), - }), - } - } - - /// Returns a list of `Deposit` objects, within the given deposit index `range`. - /// - /// The `deposit_count` is used to generate the proofs for the `Deposits`. For example, if we - /// have 100 proofs, but the eth2 chain only acknowledges 50 of them, we must produce our - /// proofs with respect to a tree size of 50. - /// - /// - /// ## Errors - /// - /// - If `deposit_count` is less than `end`. - /// - There are not sufficient deposits in the tree to generate the proof. - pub fn get_deposits( - &self, - start: u64, - end: u64, - deposit_count: u64, - ) -> Result<(Hash256, Vec), Error> { - if deposit_count < end { - // It's invalid to ask for more deposits than should exist. - Err(Error::DepositCountInvalid { - deposit_count, - range_end: end, - }) - } else if end > self.len() as u64 { - // The range of requested deposits exceeds the deposits stored locally. - Err(Error::InsufficientDeposits { - requested: end, - known_deposits: self.logs.len(), - }) - } else if self.finalized_deposit_count > start { - // Can't ask for deposits before or on the finalized deposit - Err(Error::DepositRangeInvalid { - range_start: start, - finalized_count: self.finalized_deposit_count, - }) - } else { - let (start, end, deposit_count) = ( - start - self.finalized_deposit_count, - end - self.finalized_deposit_count, - deposit_count - self.finalized_deposit_count, - ); - let leaves = self - .leaves - .get(0..deposit_count as usize) - .ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?; - - let tree = self - .deposit_tree - .get_snapshot() - .map(|snapshot| { - // The tree has already been finalized. So we can just start from the snapshot - // and replay the deposits up to `deposit_count` - let mut tree = DepositDataTree::from_snapshot(&snapshot, DEPOSIT_TREE_DEPTH) - .map_err(Error::DepositTree)?; - for leaf in leaves { - tree.push_leaf(*leaf).map_err(Error::DepositTree)?; - } - Ok(tree) - }) - .unwrap_or_else(|| { - // Deposit tree hasn't been finalized yet, will have to re-create the whole tree - Ok(DepositDataTree::create( - leaves, - leaves.len(), - DEPOSIT_TREE_DEPTH, - )) - })?; - - let mut deposits = vec![]; - self.logs - .get(start as usize..end as usize) - .ok_or_else(|| Error::Internal("Unable to get known log".into()))? - .iter() - .try_for_each(|deposit_log| { - let (_leaf, proof) = tree - .generate_proof(deposit_log.index as usize) - .map_err(Error::DepositTree)?; - deposits.push(Deposit { - proof: proof.into(), - data: deposit_log.deposit_data.clone(), - }); - Ok(()) - })?; - - Ok((tree.root(), deposits)) - } - } - - /// Returns the number of deposits with valid signatures that have been observed up to and - /// including the block at `block_number`. - /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. - pub fn get_valid_signature_count(&self, block_number: u64) -> Option { - if block_number == 0 || block_number < self.deposit_contract_deploy_block { - None - } else { - Some( - self.logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .filter(|deposit| deposit.signature_is_valid) - .count(), - ) - } - } - - /// Returns the number of deposits that have been observed up to and - /// including the block at `block_number`. - /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment - /// or prior to last finalized deposit. - pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option { - if block_number == 0 - || block_number < self.deposit_contract_deploy_block - || block_number < self.finalized_block_height - { - None - } else if block_number == self.finalized_block_height { - Some(self.finalized_deposit_count) - } else { - Some( - self.finalized_deposit_count - + self - .logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .count() as u64, - ) - } - } - - /// Gets the deposit root at block height = block_number. - /// - /// Fetches the `deposit_count` on or just before the queried `block_number` - /// and queries the `deposit_roots` map to get the corresponding `deposit_root`. - pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option { - let count = self.get_deposit_count_from_cache(block_number)?; - self.get_root(count as usize).cloned() - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use execution_layer::http::deposit_log::Log; - use types::{EthSpec, FixedBytesExtended, MainnetEthSpec}; - - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. - pub const EXAMPLE_LOG: &[u8] = &[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, - 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, - 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, - 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, - 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, - 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, - 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, - 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]; - - fn example_log() -> DepositLog { - let spec = MainnetEthSpec::default_spec(); - - let log = Log { - block_number: 42, - data: EXAMPLE_LOG.to_vec(), - }; - log.to_deposit_log(&spec).expect("should decode log") - } - - fn get_cache_with_deposits(n: u64) -> DepositCache { - let mut deposit_cache = DepositCache::default(); - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - deposit_cache - .insert_log(log) - .expect("should add consecutive logs"); - } - assert_eq!(deposit_cache.len() as u64, n, "should have {} deposits", n); - - deposit_cache - } - - #[test] - fn insert_log_valid() { - let mut deposit_cache = DepositCache::default(); - - for i in 0..16 { - let mut log = example_log(); - log.index = i; - deposit_cache - .insert_log(log) - .expect("should add consecutive logs"); - } - } - - #[test] - fn insert_log_invalid() { - let mut deposit_cache = DepositCache::default(); - - for i in 0..4 { - let mut log = example_log(); - log.index = i; - deposit_cache - .insert_log(log) - .expect("should add consecutive logs"); - } - - // Add duplicate, when given is the same as the one known. - let mut log = example_log(); - log.index = 3; - assert_eq!( - deposit_cache.insert_log(log).unwrap(), - DepositCacheInsertOutcome::Duplicate - ); - - // Add duplicate, when given is different to the one known. - let mut log = example_log(); - log.index = 3; - log.block_number = 99; - assert!(deposit_cache.insert_log(log).is_err()); - - // Skip inserting a log. - let mut log = example_log(); - log.index = 5; - assert!(deposit_cache.insert_log(log).is_err()); - } - - #[test] - fn get_deposit_valid() { - let n = 1_024; - let deposit_cache = get_cache_with_deposits(n); - - // Get 0 deposits, with max deposit count. - let (_, deposits) = deposit_cache - .get_deposits(0, 0, n) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get 0 deposits, with 0 deposit count. - let (_, deposits) = deposit_cache - .get_deposits(0, 0, 0) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get all deposits, with max deposit count. - let (full_root, deposits) = deposit_cache - .get_deposits(0, n, n) - .expect("should get the full tree"); - assert_eq!(deposits.len(), n as usize, "should return all deposits"); - - // Get 4 deposits, with max deposit count. - let (root, deposits) = deposit_cache - .get_deposits(0, 4, n) - .expect("should get the four from the full tree"); - assert_eq!( - deposits.len(), - 4_usize, - "should get 4 deposits from full tree" - ); - assert_eq!( - root, full_root, - "should still return full root when getting deposit subset" - ); - - // Get half of the deposits, with half deposit count. - let half = n / 2; - let (half_root, deposits) = deposit_cache - .get_deposits(0, half, half) - .expect("should get the half tree"); - assert_eq!(deposits.len(), half as usize, "should return half deposits"); - - // Get 4 deposits, with half deposit count. - let (root, deposits) = deposit_cache - .get_deposits(0, 4, n / 2) - .expect("should get the half tree"); - assert_eq!( - deposits.len(), - 4_usize, - "should get 4 deposits from half tree" - ); - assert_eq!( - root, half_root, - "should still return half root when getting deposit subset" - ); - assert_ne!( - full_root, half_root, - "should get different root when pinning deposit count" - ); - } - - #[test] - fn get_deposit_invalid() { - let n = 16; - let mut tree = get_cache_with_deposits(n); - - // Range too high. - assert!(tree.get_deposits(0, n + 1, n).is_err()); - - // Count too high. - assert!(tree.get_deposits(0, n, n + 1).is_err()); - - // Range higher than count. - assert!(tree.get_deposits(0, 4, 2).is_err()); - - let block7 = fake_eth1_block(&tree, 7).expect("should create fake eth1 block"); - tree.finalize(block7).expect("should finalize"); - // Range starts <= finalized deposit - assert!(tree.get_deposits(6, 9, 11).is_err()); - assert!(tree.get_deposits(7, 9, 11).is_err()); - // Range start > finalized deposit should be OK - assert!(tree.get_deposits(8, 9, 11).is_ok()); - } - - // returns an eth1 block that can be used to finalize the cache at `deposit_index` - // this will ensure the `deposit_root` on the `Eth1Block` is correct - fn fake_eth1_block(deposit_cache: &DepositCache, deposit_index: usize) -> Option { - let deposit_log = deposit_cache.get_log(deposit_index)?; - Some(Eth1Block { - hash: Hash256::from_low_u64_be(deposit_log.block_number), - timestamp: 0, - number: deposit_log.block_number, - deposit_root: deposit_cache.get_root(deposit_index + 1).cloned(), - deposit_count: Some(deposit_log.index + 1), - }) - } - - #[test] - fn test_finalization_boundaries() { - let n = 8; - let half = n / 2; - - let mut deposit_cache = get_cache_with_deposits(n as u64); - - let full_root_before_finalization = deposit_cache.deposit_tree.root(); - let half_log_plus1_before_finalization = deposit_cache - .get_log(half + 1) - .expect("log should exist") - .clone(); - let half_root_plus1_before_finalization = - *deposit_cache.get_root(half + 1).expect("root should exist"); - - let (root_before_finalization, proof_before_finalization) = deposit_cache - .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) - .expect("should return 1 deposit with proof"); - - // finalize on the tree at half - let half_block = - fake_eth1_block(&deposit_cache, half).expect("fake block should be created"); - assert!( - deposit_cache.get_deposit_snapshot().is_none(), - "snapshot should not exist as tree has not been finalized" - ); - deposit_cache - .finalize(half_block) - .expect("tree should_finalize"); - - // check boundary conditions for get_log - assert!( - deposit_cache.get_log(half).is_none(), - "log at finalized deposit should NOT exist" - ); - assert_eq!( - *deposit_cache.get_log(half + 1).expect("log should exist"), - half_log_plus1_before_finalization, - "log after finalized deposit should match before finalization" - ); - // check boundary conditions for get_root - assert!( - deposit_cache.get_root(half).is_none(), - "root at finalized deposit should NOT exist" - ); - assert_eq!( - *deposit_cache.get_root(half + 1).expect("root should exist"), - half_root_plus1_before_finalization, - "root after finalized deposit should match before finalization" - ); - // full root should match before and after finalization - assert_eq!( - deposit_cache.deposit_tree.root(), - full_root_before_finalization, - "full root should match before and after finalization" - ); - // check boundary conditions for get_deposits (proof) - assert!( - deposit_cache - .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) - .is_err(), - "cannot prove the finalized deposit" - ); - let (root_after_finalization, proof_after_finalization) = deposit_cache - .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) - .expect("should return 1 deposit with proof"); - assert_eq!( - root_before_finalization, root_after_finalization, - "roots before and after finalization should match" - ); - assert_eq!( - proof_before_finalization, proof_after_finalization, - "proof before and after finalization should match" - ); - - // recover tree from snapshot by replaying deposits - let snapshot = deposit_cache - .get_deposit_snapshot() - .expect("snapshot should exist"); - let mut recovered = DepositCache::from_deposit_snapshot(1, &snapshot) - .expect("should recover finalized tree"); - for i in half + 1..n { - let mut log = example_log(); - log.index = i as u64; - log.block_number = i as u64; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i as u64); - recovered - .insert_log(log) - .expect("should add consecutive logs"); - } - - // check the same boundary conditions above for the recovered tree - assert!( - recovered.get_log(half).is_none(), - "log at finalized deposit should NOT exist" - ); - assert_eq!( - *recovered.get_log(half + 1).expect("log should exist"), - half_log_plus1_before_finalization, - "log after finalized deposit should match before finalization in recovered tree" - ); - // check boundary conditions for get_root - assert!( - recovered.get_root(half).is_none(), - "root at finalized deposit should NOT exist" - ); - assert_eq!( - *recovered.get_root(half + 1).expect("root should exist"), - half_root_plus1_before_finalization, - "root after finalized deposit should match before finalization in recovered tree" - ); - // full root should match before and after finalization - assert_eq!( - recovered.deposit_tree.root(), - full_root_before_finalization, - "full root should match before and after finalization" - ); - // check boundary conditions for get_deposits (proof) - assert!( - recovered - .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) - .is_err(), - "cannot prove the finalized deposit" - ); - let (recovered_root_after_finalization, recovered_proof_after_finalization) = recovered - .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) - .expect("should return 1 deposit with proof"); - assert_eq!( - root_before_finalization, recovered_root_after_finalization, - "recovered roots before and after finalization should match" - ); - assert_eq!( - proof_before_finalization, recovered_proof_after_finalization, - "recovered proof before and after finalization should match" - ); - } - - #[test] - fn test_finalization() { - let n = 1024; - let half = n / 2; - let quarter = half / 2; - let mut deposit_cache = get_cache_with_deposits(n); - - let full_root_before_finalization = deposit_cache.deposit_tree.root(); - let q3_root_before_finalization = deposit_cache - .get_root((half + quarter) as usize) - .cloned() - .expect("root should exist"); - let q3_log_before_finalization = deposit_cache - .get_log((half + quarter) as usize) - .cloned() - .expect("log should exist"); - // get_log(half+quarter) should return log with index `half+quarter` - assert_eq!( - q3_log_before_finalization.index, - half + quarter, - "log index should be {}", - half + quarter, - ); - - // get lower quarter of deposits with max deposit count - let (lower_quarter_root_before_finalization, lower_quarter_deposits_before_finalization) = - deposit_cache - .get_deposits(quarter, half, n) - .expect("should get lower quarter"); - assert_eq!( - lower_quarter_deposits_before_finalization.len(), - quarter as usize, - "should get {} deposits from lower quarter", - quarter, - ); - // since the lower quarter was done with full deposits, root should be the same as full_root_before_finalization - assert_eq!( - lower_quarter_root_before_finalization, full_root_before_finalization, - "should still get full root with deposit subset", - ); - - // get upper quarter of deposits with slightly reduced deposit count - let (upper_quarter_root_before_finalization, upper_quarter_deposits_before_finalization) = - deposit_cache - .get_deposits(half, half + quarter, n - 2) - .expect("should get upper quarter"); - assert_eq!( - upper_quarter_deposits_before_finalization.len(), - quarter as usize, - "should get {} deposits from upper quarter", - quarter, - ); - // since upper quarter was with subset of nodes, it should differ from full root - assert_ne!( - full_root_before_finalization, upper_quarter_root_before_finalization, - "subtree root should differ from full root", - ); - - let f0_log = deposit_cache - .get_log((quarter - 1) as usize) - .cloned() - .expect("should return log"); - let f0_block = fake_eth1_block(&deposit_cache, (quarter - 1) as usize) - .expect("fake eth1 block should be created"); - - // finalize first quarter - deposit_cache - .finalize(f0_block) - .expect("should finalize first quarter"); - // finalized count and block number should match log - assert_eq!( - deposit_cache.finalized_deposit_count, - f0_log.index + 1, - "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", - ); - assert_eq!( - deposit_cache.finalized_block_height, - f0_log.block_number, - "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" - ); - // check get_log boundaries - assert!( - deposit_cache.get_log((quarter - 1) as usize).is_none(), - "get_log() should return None for index <= finalized log index", - ); - assert!( - deposit_cache.get_log(quarter as usize).is_some(), - "get_log() should return Some(log) for index >= finalized_deposit_count", - ); - - // full root should remain the same after finalization - assert_eq!( - full_root_before_finalization, - deposit_cache.deposit_tree.root(), - "root should be the same before and after finalization", - ); - // get_root should return the same root before and after finalization - assert_eq!( - q3_root_before_finalization, - deposit_cache - .get_root((half + quarter) as usize) - .cloned() - .expect("root should exist"), - "get_root should return the same root before and after finalization", - ); - // get_log should return the same log before and after finalization - assert_eq!( - q3_log_before_finalization, - deposit_cache - .get_log((half + quarter) as usize) - .cloned() - .expect("log should exist"), - "get_log should return the same log before and after finalization", - ); - - // again get lower quarter of deposits with max deposit count after finalization - let (f0_lower_quarter_root, f0_lower_quarter_deposits) = deposit_cache - .get_deposits(quarter, half, n) - .expect("should get lower quarter"); - assert_eq!( - f0_lower_quarter_deposits.len(), - quarter as usize, - "should get {} deposits from lower quarter", - quarter, - ); - // again get upper quarter of deposits with slightly reduced deposit count after finalization - let (f0_upper_quarter_root, f0_upper_quarter_deposits) = deposit_cache - .get_deposits(half, half + quarter, n - 2) - .expect("should get upper quarter"); - assert_eq!( - f0_upper_quarter_deposits.len(), - quarter as usize, - "should get {} deposits from upper quarter", - quarter, - ); - - // lower quarter root and deposits should be the same - assert_eq!( - lower_quarter_root_before_finalization, f0_lower_quarter_root, - "root should be the same before and after finalization", - ); - for i in 0..lower_quarter_deposits_before_finalization.len() { - assert_eq!( - lower_quarter_deposits_before_finalization[i], f0_lower_quarter_deposits[i], - "get_deposits() should be the same before and after finalization", - ); - } - // upper quarter root and deposits should be the same - assert_eq!( - upper_quarter_root_before_finalization, f0_upper_quarter_root, - "subtree root should be the same before and after finalization", - ); - for i in 0..upper_quarter_deposits_before_finalization.len() { - assert_eq!( - upper_quarter_deposits_before_finalization[i], f0_upper_quarter_deposits[i], - "get_deposits() should be the same before and after finalization", - ); - } - - let f1_log = deposit_cache - .get_log((half - 2) as usize) - .cloned() - .expect("should return log"); - // finalize a little less than half to test multiple finalization - let f1_block = fake_eth1_block(&deposit_cache, (half - 2) as usize) - .expect("should create fake eth1 block"); - deposit_cache - .finalize(f1_block) - .expect("should finalize a little less than half"); - // finalized count and block number should match f1_log - assert_eq!( - deposit_cache.finalized_deposit_count, - f1_log.index + 1, - "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", - ); - assert_eq!( - deposit_cache.finalized_block_height, - f1_log.block_number, - "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" - ); - // check get_log boundaries - assert!( - deposit_cache.get_log((half - 2) as usize).is_none(), - "get_log() should return None for index <= finalized log index", - ); - assert!( - deposit_cache.get_log((half - 1) as usize).is_some(), - "get_log() should return Some(log) for index >= finalized_deposit_count", - ); - - // full root should still be unchanged - assert_eq!( - full_root_before_finalization, - deposit_cache.deposit_tree.root(), - "root should be the same before and after finalization", - ); - - // again get upper quarter of deposits with slightly reduced deposit count after second finalization - let (f1_upper_quarter_root, f1_upper_quarter_deposits) = deposit_cache - .get_deposits(half, half + quarter, n - 2) - .expect("should get upper quarter"); - - // upper quarter root and deposits should be the same after second finalization - assert_eq!( - f0_upper_quarter_root, f1_upper_quarter_root, - "subtree root should be the same after multiple finalization", - ); - for i in 0..f0_upper_quarter_deposits.len() { - assert_eq!( - f0_upper_quarter_deposits[i], f1_upper_quarter_deposits[i], - "get_deposits() should be the same before and after finalization", - ); - } - } - - fn verify_equality(original: &DepositCache, copy: &DepositCache) { - // verify each field individually so that if one field should - // fail to recover, this test will point right to it - assert_eq!(original.deposit_contract_deploy_block, copy.deposit_contract_deploy_block, "DepositCache: deposit_contract_deploy_block should remain the same after encoding and decoding from ssz" ); - assert_eq!( - original.leaves, copy.leaves, - "DepositCache: leaves should remain the same after encoding and decoding from ssz" - ); - assert_eq!( - original.logs, copy.logs, - "DepositCache: logs should remain the same after encoding and decoding from ssz" - ); - assert_eq!(original.finalized_deposit_count, copy.finalized_deposit_count, "DepositCache: finalized_deposit_count should remain the same after encoding and decoding from ssz"); - assert_eq!(original.finalized_block_height, copy.finalized_block_height, "DepositCache: finalized_block_height should remain the same after encoding and decoding from ssz"); - assert_eq!(original.deposit_roots, copy.deposit_roots, "DepositCache: deposit_roots should remain the same before and after encoding and decoding from ssz"); - assert!(original.deposit_tree == copy.deposit_tree, "DepositCache: deposit_tree should remain the same before and after encoding and decoding from ssz"); - // verify all together for good measure - assert!( - original == copy, - "Deposit cache should remain the same after encoding and decoding from ssz" - ); - } - - fn ssz_round_trip(original: &DepositCache) -> DepositCache { - use ssz::{Decode, Encode}; - let bytes = SszDepositCache::from_deposit_cache(original).as_ssz_bytes(); - let ssz_cache = - SszDepositCache::from_ssz_bytes(&bytes).expect("should decode from ssz bytes"); - - SszDepositCache::to_deposit_cache(&ssz_cache).expect("should recover cache") - } - - #[test] - fn ssz_encode_decode() { - let deposit_cache = get_cache_with_deposits(512); - let recovered_cache = ssz_round_trip(&deposit_cache); - - verify_equality(&deposit_cache, &recovered_cache); - } - - #[test] - fn ssz_encode_decode_with_finalization() { - let mut deposit_cache = get_cache_with_deposits(512); - let block383 = fake_eth1_block(&deposit_cache, 383).expect("should create fake eth1 block"); - deposit_cache.finalize(block383).expect("should finalize"); - let mut first_recovery = ssz_round_trip(&deposit_cache); - - verify_equality(&deposit_cache, &first_recovery); - // finalize again to verify equality after multiple finalizations - let block447 = fake_eth1_block(&deposit_cache, 447).expect("should create fake eth1 block"); - first_recovery.finalize(block447).expect("should finalize"); - - let mut second_recovery = ssz_round_trip(&first_recovery); - verify_equality(&first_recovery, &second_recovery); - - // verify equality of a tree that finalized block383, block447, block479 - // with a tree that finalized block383, block479 - let block479 = fake_eth1_block(&deposit_cache, 479).expect("should create fake eth1 block"); - second_recovery - .finalize(block479.clone()) - .expect("should finalize"); - let third_recovery = ssz_round_trip(&second_recovery); - deposit_cache.finalize(block479).expect("should finalize"); - - verify_equality(&deposit_cache, &third_recovery); - } -} diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs deleted file mode 100644 index 1f45346256..0000000000 --- a/beacon_node/eth1/src/inner.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::service::endpoint_from_config; -use crate::Config; -use crate::{ - block_cache::{BlockCache, Eth1Block}, - deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV13}, -}; -use execution_layer::HttpJsonRpc; -use parking_lot::RwLock; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use superstruct::superstruct; -use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; - -// Define "legacy" implementations of `Option` which use four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_u64, u64); - -#[derive(Default)] -pub struct DepositUpdater { - pub cache: DepositCache, - pub last_processed_block: Option, -} - -impl DepositUpdater { - pub fn new(deposit_contract_deploy_block: u64) -> Self { - let cache = DepositCache::new(deposit_contract_deploy_block); - DepositUpdater { - cache, - last_processed_block: None, - } - } - - pub fn from_snapshot( - deposit_contract_deploy_block: u64, - snapshot: &DepositTreeSnapshot, - ) -> Result { - let last_processed_block = Some(snapshot.execution_block_height); - Ok(Self { - cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?, - last_processed_block, - }) - } -} - -pub struct Inner { - pub block_cache: RwLock, - pub deposit_cache: RwLock, - pub endpoint: HttpJsonRpc, - // this gets set to Some(Eth1Data) when the deposit finalization conditions are met - pub to_finalize: RwLock>, - pub config: RwLock, - pub remote_head_block: RwLock>, - pub spec: Arc, -} - -impl Inner { - /// Prunes the block cache to `self.target_block_cache_len`. - /// - /// Is a no-op if `self.target_block_cache_len` is `None`. - pub fn prune_blocks(&self) { - if let Some(block_cache_truncation) = self.config.read().block_cache_truncation { - self.block_cache.write().truncate(block_cache_truncation); - } - } - - /// Encode the eth1 block and deposit cache as bytes. - pub fn as_bytes(&self) -> Vec { - let ssz_eth1_cache = SszEth1Cache::from_inner(self); - ssz_eth1_cache.as_ssz_bytes() - } - - /// Recover `Inner` given byte representation of eth1 deposit and block caches. - pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { - SszEth1Cache::from_ssz_bytes(bytes) - .map_err(|e| format!("Ssz decoding error: {:?}", e))? - .to_inner(config, spec) - .inspect(|inner| inner.block_cache.write().rebuild_by_hash_map()) - } - - /// Returns a reference to the specification. - pub fn spec(&self) -> &ChainSpec { - &self.spec - } -} - -pub type SszEth1Cache = SszEth1CacheV13; - -#[superstruct( - variants(V13), - variant_attributes(derive(Encode, Decode, Clone)), - no_enum -)] -pub struct SszEth1Cache { - pub block_cache: BlockCache, - pub deposit_cache: SszDepositCacheV13, - #[ssz(with = "four_byte_option_u64")] - pub last_processed_block: Option, -} - -impl SszEth1Cache { - pub fn from_inner(inner: &Inner) -> Self { - let deposit_updater = inner.deposit_cache.read(); - let block_cache = inner.block_cache.read(); - Self { - block_cache: (*block_cache).clone(), - deposit_cache: SszDepositCache::from_deposit_cache(&deposit_updater.cache), - last_processed_block: deposit_updater.last_processed_block, - } - } - - pub fn to_inner(&self, config: Config, spec: Arc) -> Result { - Ok(Inner { - block_cache: RwLock::new(self.block_cache.clone()), - deposit_cache: RwLock::new(DepositUpdater { - cache: self.deposit_cache.to_deposit_cache()?, - last_processed_block: self.last_processed_block, - }), - endpoint: endpoint_from_config(&config) - .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, - to_finalize: RwLock::new(None), - // Set the remote head_block zero when creating a new instance. We only care about - // present and future eth1 nodes. - remote_head_block: RwLock::new(None), - config: RwLock::new(config), - spec, - }) - } -} diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs deleted file mode 100644 index 9c4f9a1d8d..0000000000 --- a/beacon_node/eth1/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -mod block_cache; -mod deposit_cache; -mod inner; -mod metrics; -mod service; - -pub use block_cache::{BlockCache, Eth1Block}; -pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV13}; -pub use execution_layer::http::deposit_log::DepositLog; -pub use inner::{SszEth1Cache, SszEth1CacheV13}; -pub use service::{ - BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, - DEFAULT_CHAIN_ID, -}; diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs deleted file mode 100644 index 1df4ba0df9..0000000000 --- a/beacon_node/eth1/src/metrics.rs +++ /dev/null @@ -1,41 +0,0 @@ -pub use metrics::*; -use std::sync::LazyLock; - -/* - * Eth1 blocks - */ -pub static BLOCK_CACHE_LEN: LazyLock> = - LazyLock::new(|| try_create_int_gauge("eth1_block_cache_len", "Count of eth1 blocks in cache")); -pub static LATEST_CACHED_BLOCK_TIMESTAMP: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "eth1_latest_cached_block_timestamp", - "Timestamp of latest block in eth1 cache", - ) -}); - -/* - * Eth1 deposits - */ -pub static DEPOSIT_CACHE_LEN: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "eth1_deposit_cache_len", - "Number of deposits in the eth1 cache", - ) -}); -pub static HIGHEST_PROCESSED_DEPOSIT_BLOCK: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "eth1_highest_processed_deposit_block", - "Number of the last block checked for deposits", - ) -}); - -/* - * Eth1 rpc connection - */ - -pub static ETH1_CONNECTED: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "sync_eth1_connected", - "Set to 1 if connected to an eth1 node, otherwise set to 0", - ) -}); diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs deleted file mode 100644 index 6b10bd2215..0000000000 --- a/beacon_node/eth1/src/service.rs +++ /dev/null @@ -1,1243 +0,0 @@ -use crate::metrics; -use crate::{ - block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, - deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError}, - inner::{DepositUpdater, Inner}, -}; -use execution_layer::auth::Auth; -use execution_layer::http::{ - deposit_methods::{BlockQuery, Eth1Id}, - HttpJsonRpc, -}; -use futures::future::TryFutureExt; -use parking_lot::{RwLock, RwLockReadGuard}; -use sensitive_url::SensitiveUrl; -use serde::{Deserialize, Serialize}; -use std::fmt::Debug; -use std::ops::{Range, RangeInclusive}; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::time::{interval_at, Duration, Instant}; -use tracing::{debug, error, info, trace, warn}; -use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; - -/// Indicates the default eth1 chain id we use for the deposit contract. -pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Mainnet; -/// Indicates the default eth1 endpoint. -pub const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545"; - -const STANDARD_TIMEOUT_MILLIS: u64 = 15_000; - -/// Timeout when doing a eth_blockNumber call. -const BLOCK_NUMBER_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; -/// Timeout when doing an eth_getBlockByNumber call. -const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; -/// Timeout when doing an eth_getLogs to read the deposit contract logs. -const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; - -/// Number of blocks to download if the node detects it is lagging behind due to an inaccurate -/// relationship between block-number-based follow distance and time-based follow distance. -const CATCHUP_BATCH_SIZE: u64 = 128; - -/// The absolute minimum follow distance to enforce when downloading catchup batches. -const CATCHUP_MIN_FOLLOW_DISTANCE: u64 = 64; - -/// To account for fast PoW blocks requiring more blocks in the cache than the block-based follow -/// distance would imply, we store `CACHE_FACTOR` more blocks in our cache. -const CACHE_FACTOR: u64 = 2; - -#[derive(Debug, PartialEq, Clone)] -pub enum EndpointError { - RequestFailed(String), - WrongChainId, - FarBehind, -} - -type EndpointState = Result<(), EndpointError>; - -/// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and -/// chain id. Otherwise it returns `Err`. -async fn endpoint_state(endpoint: &HttpJsonRpc, config_chain_id: &Eth1Id) -> EndpointState { - let error_connecting = |e: String| { - debug!( - %endpoint, - error = &e, - "eth1 endpoint error" - ); - warn!( - %endpoint, - "Error connecting to eth1 node endpoint" - ); - EndpointError::RequestFailed(e) - }; - - let chain_id = endpoint - .get_chain_id(Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) - .await - .map_err(error_connecting)?; - // Eth1 nodes return chain_id = 0 if the node is not synced - // Handle the special case - if chain_id == Eth1Id::Custom(0) { - warn!( - %endpoint, - "Remote execution node is not synced" - ); - return Err(EndpointError::FarBehind); - } - if &chain_id != config_chain_id { - warn!( - %endpoint, - expected = ?config_chain_id, - received = ?chain_id, - "Invalid execution chain ID. Please switch to correct chain ID on endpoint" - ); - Err(EndpointError::WrongChainId) - } else { - Ok(()) - } -} - -/// Enum for the two internal (maybe different) cached heads for cached deposits and for the block -/// cache. -pub enum HeadType { - Deposit, - BlockCache, -} - -/// Returns the head block and the new block ranges relevant for deposits and the block cache -/// from the given endpoint. -async fn get_remote_head_and_new_block_ranges( - endpoint: &HttpJsonRpc, - service: &Service, - node_far_behind_seconds: u64, -) -> Result< - ( - Eth1Block, - Option>, - Option>, - ), - Error, -> { - let remote_head_block = download_eth1_block(endpoint, service.inner.clone(), None).await?; - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(u64::MAX); - if remote_head_block.timestamp + node_far_behind_seconds < now { - warn!( - %endpoint, - last_seen_block_unix_timestamp = remote_head_block.timestamp, - "Execution endpoint is not synced" - ); - return Err(Error::EndpointError(EndpointError::FarBehind)); - } - - let handle_remote_not_synced = |e| { - if let Error::RemoteNotSynced { .. } = e { - warn!( - %endpoint, - "Execution endpoint is not synced" - ); - } - e - }; - let new_deposit_block_numbers = service - .relevant_new_block_numbers( - remote_head_block.number, - Some(remote_head_block.timestamp), - HeadType::Deposit, - ) - .map_err(handle_remote_not_synced)?; - let new_block_cache_numbers = service - .relevant_new_block_numbers( - remote_head_block.number, - Some(remote_head_block.timestamp), - HeadType::BlockCache, - ) - .map_err(handle_remote_not_synced)?; - Ok(( - remote_head_block, - new_deposit_block_numbers, - new_block_cache_numbers, - )) -} - -/// Returns the range of new block numbers to be considered for the given head type from the given -/// endpoint. -async fn relevant_new_block_numbers_from_endpoint( - endpoint: &HttpJsonRpc, - service: &Service, - head_type: HeadType, -) -> Result>, Error> { - let remote_highest_block = endpoint - .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(Error::GetBlockNumberFailed) - .await?; - service.relevant_new_block_numbers(remote_highest_block, None, head_type) -} - -#[derive(Debug, PartialEq)] -pub enum Error { - /// There was an inconsistency when adding a block to the cache. - FailedToInsertEth1Block(BlockCacheError), - /// There was an inconsistency when adding a deposit to the cache. - FailedToInsertDeposit(DepositCacheError), - /// A log downloaded from the eth1 contract was not well formed. - FailedToParseDepositLog { - block_range: Range, - error: String, - }, - /// Endpoint is currently not functional. - EndpointError(EndpointError), - /// The remote node is less synced that we expect, it is not useful until has done more - /// syncing. - RemoteNotSynced { - next_required_block: u64, - remote_highest_block: u64, - cache_follow_distance: u64, - }, - /// Failed to download a block from the eth1 node. - BlockDownloadFailed(String), - /// Failed to get the current block number from the eth1 node. - GetBlockNumberFailed(String), - /// Failed to read the deposit contract root from the eth1 node. - GetDepositRootFailed(String), - /// Failed to read the deposit contract deposit count from the eth1 node. - GetDepositCountFailed(String), - /// Failed to read the deposit contract root from the eth1 node. - GetDepositLogsFailed(String), - /// There was an unexpected internal error. - Internal(String), - /// Error finalizing deposit - FailedToFinalizeDeposit(String), - /// There was a problem Initializing from deposit snapshot - FailedToInitializeFromSnapshot(String), -} - -/// The success message for an Eth1Data cache update. -#[derive(Debug, PartialEq, Clone)] -pub struct BlockCacheUpdateOutcome { - pub blocks_imported: usize, - pub head_block_number: Option, -} - -/// The success message for an Eth1 deposit cache update. -#[derive(Debug, PartialEq, Clone)] -pub struct DepositCacheUpdateOutcome { - pub logs_imported: usize, -} - -/// Supports either one authenticated jwt JSON-RPC endpoint **or** -/// multiple non-authenticated endpoints with fallback. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum Eth1Endpoint { - Auth { - endpoint: SensitiveUrl, - jwt_path: PathBuf, - jwt_id: Option, - jwt_version: Option, - }, - NoAuth(SensitiveUrl), -} - -impl Eth1Endpoint { - pub fn get_endpoint(&self) -> SensitiveUrl { - match &self { - Self::Auth { endpoint, .. } => endpoint.clone(), - Self::NoAuth(endpoint) => endpoint.clone(), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoint: Eth1Endpoint, - /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. - pub deposit_contract_address: String, - /// The eth1 chain id where the deposit contract is deployed (Holesky/Mainnet). - pub chain_id: Eth1Id, - /// Defines the first block that the `DepositCache` will start searching for deposit logs. - /// - /// Setting too high can result in missed logs. Setting too low will result in unnecessary - /// calls to the Eth1 node's HTTP JSON RPC. - pub deposit_contract_deploy_block: u64, - /// Defines the lowest block number that should be downloaded and added to the `BlockCache`. - pub lowest_cached_block_number: u64, - /// Defines how far behind the Eth1 node's head we should follow. - /// - /// Note: this should be less than or equal to the specification's `ETH1_FOLLOW_DISTANCE`. - pub follow_distance: u64, - /// The follow distance to use for blocks in our cache. - /// - /// This can be set lower than the true follow distance in order to correct for poor timing - /// of eth1 blocks. - pub cache_follow_distance: Option, - /// Specifies the seconds when we consider the head of a node far behind. - /// This should be less than `ETH1_FOLLOW_DISTANCE * SECONDS_PER_ETH1_BLOCK`. - pub node_far_behind_seconds: u64, - /// Defines the number of blocks that should be retained each time the `BlockCache` calls truncate on - /// itself. - pub block_cache_truncation: Option, - /// The interval between updates when using the `auto_update` function. - pub auto_update_interval_millis: u64, - /// The span of blocks we should query for logs, per request. - pub blocks_per_log_query: usize, - /// The maximum number of log requests per update. - pub max_log_requests_per_update: Option, - /// The maximum number of log requests per update. - pub max_blocks_per_update: Option, - /// If set to true, the eth1 caches are wiped clean when the eth1 service starts. - pub purge_cache: bool, - pub execution_timeout_multiplier: u32, -} - -impl Config { - /// Sets the block cache to a length that is suitable for the given `EthSpec` and `ChainSpec`. - pub fn set_block_cache_truncation(&mut self, spec: &ChainSpec) { - // Compute the number of eth1 blocks in an eth1 voting period. - let seconds_per_voting_period = - E::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; - let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - - // Ensure we can store two full windows of voting blocks. - let voting_windows = eth1_blocks_per_voting_period * 2; - - // Extend the cache to account for the cache follow distance. - let extra_follow_distance_blocks = self - .follow_distance - .saturating_sub(self.cache_follow_distance()); - - let length = voting_windows + extra_follow_distance_blocks; - - // Allow for more blocks to account for blocks being generated faster than expected. - // The cache expiry should really be timestamp based, but that would require a more - // extensive refactor. - let cache_size = CACHE_FACTOR * length; - - self.block_cache_truncation = Some(cache_size as usize); - } - - /// The distance at which the cache should follow the head. - /// - /// Defaults to 3/4 of `follow_distance` unless set manually. - pub fn cache_follow_distance(&self) -> u64 { - self.cache_follow_distance - .unwrap_or(3 * self.follow_distance / 4) - } -} - -impl Default for Config { - fn default() -> Self { - Self { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL."), - ), - deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), - chain_id: DEFAULT_CHAIN_ID, - deposit_contract_deploy_block: 1, - lowest_cached_block_number: 1, - follow_distance: 128, - cache_follow_distance: None, - node_far_behind_seconds: 128 * 14, - block_cache_truncation: Some(4_096), - auto_update_interval_millis: 60_000, - blocks_per_log_query: 1_000, - max_log_requests_per_update: Some(5_000), - max_blocks_per_update: Some(8_192), - purge_cache: false, - execution_timeout_multiplier: 1, - } - } -} - -pub fn endpoint_from_config(config: &Config) -> Result { - match config.endpoint.clone() { - Eth1Endpoint::Auth { - endpoint, - jwt_path, - jwt_id, - jwt_version, - } => { - let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) - .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) - } - Eth1Endpoint::NoAuth(endpoint) => { - HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) - } - } -} - -/// Provides a set of Eth1 caches and async functions to update them. -/// -/// Stores the following caches: -/// -/// - Deposit cache: stores all deposit logs from the deposit contract. -/// - Block cache: stores some number of eth1 blocks. -#[derive(Clone)] -pub struct Service { - inner: Arc, -} - -impl Service { - /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, spec: Arc) -> Result { - Ok(Self { - inner: Arc::new(Inner { - block_cache: <_>::default(), - deposit_cache: RwLock::new(DepositUpdater::new( - config.deposit_contract_deploy_block, - )), - endpoint: endpoint_from_config(&config)?, - to_finalize: RwLock::new(None), - remote_head_block: RwLock::new(None), - config: RwLock::new(config), - spec, - }), - }) - } - - pub fn chain_spec(&self) -> &Arc { - &self.inner.spec - } - - pub fn client(&self) -> &HttpJsonRpc { - &self.inner.endpoint - } - - /// Creates a new service, initializing the deposit tree from a snapshot. - pub fn from_deposit_snapshot( - config: Config, - spec: Arc, - deposit_snapshot: &DepositTreeSnapshot, - ) -> Result { - let deposit_cache = - DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot) - .map_err(Error::FailedToInitializeFromSnapshot)?; - - Ok(Self { - inner: Arc::new(Inner { - block_cache: <_>::default(), - deposit_cache: RwLock::new(deposit_cache), - endpoint: endpoint_from_config(&config) - .map_err(Error::FailedToInitializeFromSnapshot)?, - to_finalize: RwLock::new(None), - remote_head_block: RwLock::new(None), - config: RwLock::new(config), - spec, - }), - }) - } - - pub fn set_to_finalize(&self, eth1_data: Option) { - *(self.inner.to_finalize.write()) = eth1_data; - } - - /// Returns the follow distance that has been shortened to accommodate for differences in the - /// spacing between blocks. - pub fn cache_follow_distance(&self) -> u64 { - self.config().cache_follow_distance() - } - - /// Return byte representation of deposit and block caches. - pub fn as_bytes(&self) -> Vec { - self.inner.as_bytes() - } - - /// Recover the deposit and block caches from encoded bytes. - pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { - let inner = Inner::from_bytes(bytes, config, spec)?; - Ok(Self { - inner: Arc::new(inner), - }) - } - - /// Provides access to the block cache. - pub fn blocks(&self) -> &RwLock { - &self.inner.block_cache - } - - /// Provides access to the deposit cache. - pub fn deposits(&self) -> &RwLock { - &self.inner.deposit_cache - } - - /// Removes all blocks from the cache, except for the latest block. - /// - /// We don't remove the latest blocks so we don't lose track of the latest block. - pub fn clear_block_cache(&self) { - self.inner.block_cache.write().truncate(1) - } - - /// Drop the block cache, replacing it with an empty one. - pub fn drop_block_cache(&self) { - *(self.inner.block_cache.write()) = BlockCache::default(); - } - - /// Returns the timestamp of the earliest block in the cache (if any). - pub fn earliest_block_timestamp(&self) -> Option { - self.inner.block_cache.read().earliest_block_timestamp() - } - - /// Returns the timestamp of the latest block in the cache (if any). - pub fn latest_block_timestamp(&self) -> Option { - self.inner.block_cache.read().latest_block_timestamp() - } - - /// Returns the latest head block returned from an Eth1 node. - /// - /// ## Note - /// - /// This is the simply the head of the Eth1 chain, with no regard to follow distance or the - /// voting period start. - pub fn head_block(&self) -> Option { - self.inner.remote_head_block.read().as_ref().cloned() - } - - /// Returns the latest cached block. - pub fn latest_cached_block(&self) -> Option { - self.inner.block_cache.read().latest_block().cloned() - } - - /// Returns the lowest block number stored. - pub fn lowest_block_number(&self) -> Option { - self.inner.block_cache.read().lowest_block_number() - } - - /// Returns the highest block that is present in both the deposit and block caches. - pub fn highest_safe_block(&self) -> Option { - let block_cache = self.blocks().read().highest_block_number()?; - let deposit_cache = self.deposits().read().last_processed_block?; - - Some(std::cmp::min(block_cache, deposit_cache)) - } - - /// Returns the number of currently cached blocks. - pub fn block_cache_len(&self) -> usize { - self.blocks().read().len() - } - - /// Returns the number deposits available in the deposit cache. - pub fn deposit_cache_len(&self) -> usize { - self.deposits().read().cache.len() - } - - /// Returns the number of deposits with valid signatures that have been observed. - pub fn get_valid_signature_count(&self) -> Option { - let highest_safe_block = self.highest_safe_block()?; - self.deposits() - .read() - .cache - .get_valid_signature_count(highest_safe_block) - } - - /// Returns the number of deposits with valid signatures that have been observed, without - /// respecting the `highest_safe_block`. - pub fn get_raw_valid_signature_count(&self) -> Option { - let deposits = self.deposits().read(); - deposits - .cache - .get_valid_signature_count(deposits.cache.latest_block_number()) - } - - /// Returns the number of deposits with valid signatures that have been observed up to and - /// including the block at `block_number`. - /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. - pub fn get_valid_signature_count_at_block(&self, block_number: u64) -> Option { - self.deposits() - .read() - .cache - .get_valid_signature_count(block_number) - } - - /// Read the service's configuration. - pub fn config(&self) -> RwLockReadGuard { - self.inner.config.read() - } - - /// Updates the configuration in `self to be `new_config`. - /// - /// Will truncate the block cache if the new configure specifies truncation. - pub fn update_config(&self, new_config: Config) -> Result<(), String> { - let mut old_config = self.inner.config.write(); - - if new_config.deposit_contract_deploy_block != old_config.deposit_contract_deploy_block { - // This may be possible, I just haven't looked into the details to ensure it's safe. - Err("Updating deposit_contract_deploy_block is not supported".to_string()) - } else { - *old_config = new_config; - - // Prevents a locking condition when calling prune_blocks. - drop(old_config); - - self.inner.prune_blocks(); - - Ok(()) - } - } - - /// Set the lowest block that the block cache will store. - /// - /// Note: this block may not always be present if truncating is enabled. - pub fn set_lowest_cached_block(&self, block_number: u64) { - self.inner.config.write().lowest_cached_block_number = block_number; - } - - /// Update the deposit and block cache, returning an error if either fail. - /// - /// ## Returns - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub async fn update( - &self, - ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let client = self.client(); - let chain_id = self.config().chain_id.clone(); - let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds; - - match endpoint_state(client, &chain_id).await { - Ok(()) => crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1), - Err(e) => { - crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); - return Err(format!("Invalid endpoint state: {:?}", e)); - } - } - let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) = - get_remote_head_and_new_block_ranges(client, self, node_far_behind_seconds) - .await - .map_err(|e| format!("Failed to get remote head and new block ranges: {:?}", e))?; - - *self.inner.remote_head_block.write() = Some(remote_head_block); - - let update_deposit_cache = async { - let outcome_result = self - .update_deposit_cache(Some(new_block_numbers_deposit)) - .await; - - // Reset the `last_procesed block` to the last valid deposit's block number. - // This will ensure that the next batch of blocks fetched is immediately after - // the last cached valid deposit allowing us to recover from scenarios where - // the deposit cache gets corrupted due to invalid responses from eth1 nodes. - if let Err(Error::FailedToInsertDeposit(DepositCacheError::NonConsecutive { - log_index: _, - expected: _, - })) = &outcome_result - { - let mut deposit_cache = self.inner.deposit_cache.write(); - debug!( - old_block_number = deposit_cache.last_processed_block, - new_block_number = deposit_cache.cache.latest_block_number(), - "Resetting last processed block" - ); - deposit_cache.last_processed_block = - Some(deposit_cache.cache.latest_block_number()); - } - - let outcome = - outcome_result.map_err(|e| format!("Failed to update deposit cache: {:?}", e))?; - - trace!( - cached_deposits = self.inner.deposit_cache.read().cache.len(), - logs_imported = outcome.logs_imported, - last_processed_execution_block = - self.inner.deposit_cache.read().last_processed_block, - "Updated deposit cache" - ); - Ok::<_, String>(outcome) - }; - - let update_block_cache = async { - let outcome = self - .update_block_cache(Some(new_block_numbers_block_cache)) - .await - .map_err(|e| format!("Failed to update deposit contract block cache: {:?}", e))?; - - trace!( - cached_blocks = self.inner.block_cache.read().len(), - blocks_imported = outcome.blocks_imported, - head_block = outcome.head_block_number, - "Updated deposit contract block cache" - ); - Ok::<_, String>(outcome) - }; - - let (deposit_outcome, block_outcome) = - futures::try_join!(update_deposit_cache, update_block_cache)?; - - Ok((deposit_outcome, block_outcome)) - } - - /// A looping future that updates the cache, then waits `config.auto_update_interval` before - /// updating it again. - /// - /// ## Returns - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub fn auto_update(self, handle: task_executor::TaskExecutor) { - let update_interval = Duration::from_millis(self.config().auto_update_interval_millis); - - let mut interval = interval_at(Instant::now(), update_interval); - - let update_future = async move { - loop { - interval.tick().await; - self.do_update(update_interval).await.ok(); - } - }; - - handle.spawn(update_future, "eth1"); - } - - async fn do_update(&self, update_interval: Duration) -> Result<(), ()> { - let update_result = self.update().await; - match update_result { - Err(e) => error!( - retry_millis = update_interval.as_millis(), - error = e, - "Error updating deposit contract cache" - ), - Ok((deposit, block)) => debug!( - retry_millis = update_interval.as_millis(), - ?block, - ?deposit, - "Updated deposit contract cache" - ), - }; - let optional_eth1data = self.inner.to_finalize.write().take(); - if let Some(eth1data_to_finalize) = optional_eth1data { - let already_finalized = self - .inner - .deposit_cache - .read() - .cache - .finalized_deposit_count(); - let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; - if deposit_count_to_finalize > already_finalized { - match self.finalize_deposits(eth1data_to_finalize) { - Err(e) => warn!( - error = ?e, - info = "this should resolve on its own", - "Failed to finalize deposit cache" - ), - Ok(()) => info!( - finalized_deposit_count = deposit_count_to_finalize, - "Successfully finalized deposit tree" - ), - } - } else { - debug!( - %already_finalized, - %deposit_count_to_finalize, - "Deposits tree already finalized" - ); - } - } - Ok(()) - } - - /// Returns the range of new block numbers to be considered for the given head type. - fn relevant_new_block_numbers( - &self, - remote_highest_block_number: u64, - remote_highest_block_timestamp: Option, - head_type: HeadType, - ) -> Result>, Error> { - let follow_distance = self.cache_follow_distance(); - let latest_cached_block = self.latest_cached_block(); - let next_required_block = match head_type { - HeadType::Deposit => self - .deposits() - .read() - .last_processed_block - .map(|n| n + 1) - .unwrap_or_else(|| self.config().deposit_contract_deploy_block), - HeadType::BlockCache => latest_cached_block - .as_ref() - .map(|block| block.number + 1) - .unwrap_or_else(|| self.config().lowest_cached_block_number), - }; - - relevant_block_range( - remote_highest_block_number, - remote_highest_block_timestamp, - next_required_block, - follow_distance, - latest_cached_block.as_ref(), - &self.inner.spec, - ) - } - - pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> { - let eth1_block = self - .inner - .block_cache - .read() - .block_by_hash(ð1_data.block_hash) - .cloned() - .ok_or_else(|| { - Error::FailedToFinalizeDeposit(format!( - "Finalized block not found in block cache: {:?}", - eth1_data.block_hash - )) - })?; - self.inner - .deposit_cache - .write() - .cache - .finalize(eth1_block) - .map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e))) - } - - pub fn get_deposit_snapshot(&self) -> Option { - self.inner.deposit_cache.read().cache.get_deposit_snapshot() - } - - /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured - /// follow-distance block. - /// - /// Will process no more than `BLOCKS_PER_LOG_QUERY * MAX_LOG_REQUESTS_PER_UPDATE` blocks in a - /// single update. - /// - /// If `remote_highest_block_opt` is `Some`, use that value instead of querying `self.endpoint` - /// for the head of the eth1 chain. - /// - /// ## Resolves with - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub async fn update_deposit_cache( - &self, - new_block_numbers: Option>>, - ) -> Result { - let client = self.client(); - let deposit_contract_address = self.config().deposit_contract_address.clone(); - - let blocks_per_log_query = self.config().blocks_per_log_query; - let max_log_requests_per_update = self - .config() - .max_log_requests_per_update - .unwrap_or(usize::MAX); - - let range = { - match new_block_numbers { - Some(range) => range, - None => { - relevant_new_block_numbers_from_endpoint(client, self, HeadType::Deposit) - .await? - } - } - }; - - let block_number_chunks = if let Some(range) = range { - range - .collect::>() - .chunks(blocks_per_log_query) - .take(max_log_requests_per_update) - .map(|vec| { - let first = vec.first().cloned().unwrap_or(0); - let last = vec.last().map(|n| n + 1).unwrap_or(0); - first..last - }) - .collect::>>() - } else { - Vec::new() - }; - - let mut logs_imported: usize = 0; - let deposit_contract_address_ref: &str = &deposit_contract_address; - for block_range in block_number_chunks.into_iter() { - if block_range.is_empty() { - debug!("No new blocks to scan for logs"); - continue; - } - - /* - * Step 1. Download logs. - */ - let block_range_ref = &block_range; - let logs = client - .get_deposit_logs_in_range( - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(Error::GetDepositLogsFailed)?; - - /* - * Step 2. Import logs to cache. - */ - let mut cache = self.deposits().write(); - logs.iter() - .map(|raw_log| { - raw_log.to_deposit_log(self.inner.spec()).map_err(|error| { - Error::FailedToParseDepositLog { - block_range: block_range.clone(), - error, - } - }) - }) - // Return early if any of the logs cannot be parsed. - // - // This costs an additional `collect`, however it enforces that no logs are - // imported if any one of them cannot be parsed. - .collect::, _>>()? - .into_iter() - // Returns if a deposit is unable to be added to the cache. - // - // If this error occurs, the cache will no longer be guaranteed to hold either - // none or all of the logs for each block (i.e., they may exist _some_ logs for - // a block, but not _all_ logs for that block). This scenario can cause the - // node to choose an invalid genesis state or propose an invalid block. - .try_for_each(|deposit_log| { - if let DepositCacheInsertOutcome::Inserted = cache - .cache - .insert_log(deposit_log) - .map_err(Error::FailedToInsertDeposit)? - { - logs_imported += 1; - } - - Ok::<_, Error>(()) - })?; - - debug!(logs = logs.len(), "Imported deposit logs chunk"); - - cache.last_processed_block = Some(block_range.end.saturating_sub(1)); - - metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64); - metrics::set_gauge( - &metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK, - cache.last_processed_block.unwrap_or(0) as i64, - ); - } - - if logs_imported > 0 { - info!( - latest_block = self.inner.deposit_cache.read().cache.latest_block_number(), - total = self.deposit_cache_len(), - new = logs_imported, - "Imported deposit log(s)" - ); - } else { - debug!( - latest_block = self.inner.deposit_cache.read().cache.latest_block_number(), - total_deposits = self.deposit_cache_len(), - "No new deposits found" - ); - } - - Ok(DepositCacheUpdateOutcome { logs_imported }) - } - - /// Contacts the remote eth1 node and attempts to import all blocks up to the configured - /// follow-distance block. - /// - /// If configured, prunes the block cache after importing new blocks. - /// - /// If `remote_highest_block_opt` is `Some`, use that value instead of querying `self.endpoint` - /// for the head of the eth1 chain. - /// - /// ## Resolves with - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub async fn update_block_cache( - &self, - new_block_numbers: Option>>, - ) -> Result { - let client = self.client(); - let block_cache_truncation = self.config().block_cache_truncation; - let max_blocks_per_update = self.config().max_blocks_per_update.unwrap_or(usize::MAX); - - let range = { - match new_block_numbers { - Some(range) => range, - None => { - relevant_new_block_numbers_from_endpoint(client, self, HeadType::BlockCache) - .await? - } - } - }; - - // Map the range of required blocks into a Vec. - // - // If the required range is larger than the size of the cache, drop the exiting cache - // because it's exipred and just download enough blocks to fill the cache. - let required_block_numbers = if let Some(range) = range { - if range.start() > range.end() { - // Note: this check is not strictly necessary, however it remains to safe - // guard against any regression which may cause an underflow in a following - // subtraction operation. - return Err(Error::Internal("Range was not increasing".into())); - } else { - let range_size = range.end() - range.start(); - let max_size = block_cache_truncation - .map(|n| n as u64) - .unwrap_or_else(|| u64::MAX); - if range_size > max_size { - // If the range of required blocks is larger than `max_size`, drop all - // existing blocks and download `max_size` count of blocks. - let first_block = range.end() - max_size; - (*self.inner.block_cache.write()) = BlockCache::default(); - (first_block..=*range.end()).collect::>() - } else { - range.collect::>() - } - } - } else { - Vec::new() - }; - - // This value is used to prevent the block cache from importing a block that is not yet in - // the deposit cache. - let latest_in_cache = self - .inner - .deposit_cache - .read() - .last_processed_block - .unwrap_or(0); - - let required_block_numbers = required_block_numbers - .into_iter() - .filter(|x| *x <= latest_in_cache) - .take(max_blocks_per_update) - .collect::>(); - - debug!( - first = ?required_block_numbers.first(), - last = ?required_block_numbers.last(), - "Downloading execution blocks" - ); - - // Produce a stream from the list of required block numbers and return a future that - // consumes the it. - - let mut blocks_imported = 0; - for block_number in required_block_numbers { - let eth1_block = - download_eth1_block(client, self.inner.clone(), Some(block_number)).await?; - - self.inner - .block_cache - .write() - .insert_root_or_child(eth1_block) - .map_err(Error::FailedToInsertEth1Block)?; - - metrics::set_gauge( - &metrics::BLOCK_CACHE_LEN, - self.inner.block_cache.read().len() as i64, - ); - metrics::set_gauge( - &metrics::LATEST_CACHED_BLOCK_TIMESTAMP, - self.inner - .block_cache - .read() - .latest_block_timestamp() - .unwrap_or(0) as i64, - ); - - blocks_imported += 1; - } - - // Prune the block cache, preventing it from growing too large. - self.inner.prune_blocks(); - - metrics::set_gauge( - &metrics::BLOCK_CACHE_LEN, - self.inner.block_cache.read().len() as i64, - ); - - let block_cache = self.inner.block_cache.read(); - let latest_block_mins = block_cache - .latest_block_timestamp() - .and_then(|timestamp| { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .and_then(|now| now.checked_sub(Duration::from_secs(timestamp))) - }) - .map(|duration| format!("{} mins", duration.as_secs() / 60)) - .unwrap_or_else(|| "n/a".into()); - - if blocks_imported > 0 { - debug!( - latest_block_age = latest_block_mins, - latest_block = block_cache.highest_block_number(), - total_cached_blocks = block_cache.len(), - new = %blocks_imported, - "Imported execution block(s)" - ); - } else { - debug!( - latest_block = block_cache.highest_block_number(), - cached_blocks = block_cache.len(), - "No new execution blocks imported" - ); - } - - Ok(BlockCacheUpdateOutcome { - blocks_imported, - head_block_number: block_cache.highest_block_number(), - }) - } -} - -/// Returns the range of blocks starting from `next_required_block` that are at least -/// `follow_distance` many blocks before `remote_highest_block`. -/// Returns an error if `next_required_block > remote_highest_block + 1` which means the remote went -/// backwards. -fn relevant_block_range( - remote_highest_block_number: u64, - remote_highest_block_timestamp: Option, - next_required_block: u64, - cache_follow_distance: u64, - latest_cached_block: Option<&Eth1Block>, - spec: &ChainSpec, -) -> Result>, Error> { - // If the latest cached block is lagging the head block by more than `cache_follow_distance` - // times the expected block time then the eth1 block time is likely quite different from what we - // assumed. - // - // In order to catch up, load batches of `CATCHUP_BATCH_SIZE` until the situation rights itself. - // Note that we need to check this condition before the regular follow distance condition - // or we will keep downloading small numbers of blocks. - if let (Some(remote_highest_block_timestamp), Some(latest_cached_block)) = - (remote_highest_block_timestamp, latest_cached_block) - { - let lagging = latest_cached_block.timestamp - + cache_follow_distance * spec.seconds_per_eth1_block - < remote_highest_block_timestamp; - let end_block = std::cmp::max( - std::cmp::min( - remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), - next_required_block + CATCHUP_BATCH_SIZE, - ), - remote_highest_block_number.saturating_sub(cache_follow_distance), - ); - if lagging && next_required_block <= end_block { - return Ok(Some(next_required_block..=end_block)); - } - } - - let remote_follow_block = remote_highest_block_number.saturating_sub(cache_follow_distance); - if next_required_block <= remote_follow_block { - Ok(Some(next_required_block..=remote_follow_block)) - } else if next_required_block > remote_highest_block_number + 1 { - // If this is the case, the node must have gone "backwards" in terms of it's sync - // (i.e., it's head block is lower than it was before). - // - // We assume that the `cache_follow_distance` should be sufficient to ensure this never - // happens, otherwise it is an error. - Err(Error::RemoteNotSynced { - next_required_block, - remote_highest_block: remote_highest_block_number, - cache_follow_distance, - }) - } else { - // Return an empty range. - Ok(None) - } -} - -/// Downloads the `(block, deposit_root, deposit_count)` tuple from an eth1 node for the given -/// `block_number`. -/// -/// Set `block_number_opt = None` to get the "latest" eth1 block (i.e., the head). -/// -/// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. -async fn download_eth1_block( - endpoint: &HttpJsonRpc, - cache: Arc, - block_number_opt: Option, -) -> Result { - let deposit_root = block_number_opt.and_then(|block_number| { - cache - .deposit_cache - .read() - .cache - .get_deposit_root_from_cache(block_number) - }); - - let deposit_count = block_number_opt.and_then(|block_number| { - cache - .deposit_cache - .read() - .cache - .get_deposit_count_from_cache(block_number) - }); - - // Performs a `get_blockByNumber` call to an eth1 node. - let http_block = endpoint - .get_block( - block_number_opt - .map(BlockQuery::Number) - .unwrap_or_else(|| BlockQuery::Latest), - Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), - ) - .map_err(Error::BlockDownloadFailed) - .await?; - - Ok(Eth1Block { - hash: http_block.hash, - number: http_block.number, - timestamp: http_block.timestamp, - deposit_root, - deposit_count, - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use types::MainnetEthSpec; - - #[test] - // Ensures the default config does not panic. - fn default_config() { - Config::default(); - } - - #[test] - fn serde_serialize() { - let serialized = - serde_yaml::to_string(&Config::default()).expect("Should serde encode default config"); - serde_yaml::from_str::(&serialized).expect("Should serde decode default config"); - } - - #[test] - fn block_cache_size() { - let mut config = Config::default(); - - let spec = MainnetEthSpec::default_spec(); - - config.set_block_cache_truncation::(&spec); - - let len = config.block_cache_truncation.unwrap(); - - let seconds_per_voting_period = - ::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; - let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - let cache_follow_distance_blocks = config.follow_distance - config.cache_follow_distance(); - - let minimum_len = eth1_blocks_per_voting_period * 2 + cache_follow_distance_blocks; - - assert!(len > minimum_len as usize); - } -} diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs deleted file mode 100644 index 48ed189259..0000000000 --- a/beacon_node/eth1/tests/test.rs +++ /dev/null @@ -1,836 +0,0 @@ -#![cfg(test)] -use environment::{Environment, EnvironmentBuilder}; -use eth1::{Config, Eth1Endpoint, Service}; -use eth1::{DepositCache, DEFAULT_CHAIN_ID}; -use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; -use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; -use logging::create_test_tracing_subscriber; -use merkle_proof::verify_merkle_proof; -use sensitive_url::SensitiveUrl; -use std::ops::Range; -use std::sync::Arc; -use std::time::Duration; -use tree_hash::TreeHash; -use types::{ - DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, - Signature, -}; - -const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; - -pub fn new_env() -> Environment { - create_test_tracing_subscriber(); - EnvironmentBuilder::minimal() - .multi_threaded_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("should build env") -} - -fn timeout() -> Duration { - Duration::from_secs(2) -} - -fn random_deposit_data() -> DepositData { - let keypair = Keypair::random(); - - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount: 32_000_000_000, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &MainnetEthSpec::default_spec()); - - deposit -} - -/// Blocking operation to get the deposit logs from the `deposit_contract`. -async fn blocking_deposit_logs( - client: &HttpJsonRpc, - eth1: &AnvilEth1Instance, - range: Range, -) -> Vec { - client - .get_deposit_logs_in_range(ð1.deposit_contract.address(), range, timeout()) - .await - .expect("should get logs") -} - -/// Blocking operation to get the deposit root from the `deposit_contract`. -async fn blocking_deposit_root( - client: &HttpJsonRpc, - eth1: &AnvilEth1Instance, - block_number: u64, -) -> Option { - client - .get_deposit_root(ð1.deposit_contract.address(), block_number, timeout()) - .await - .expect("should get deposit root") -} - -/// Blocking operation to get the deposit count from the `deposit_contract`. -async fn blocking_deposit_count( - client: &HttpJsonRpc, - eth1: &AnvilEth1Instance, - block_number: u64, -) -> Option { - client - .get_deposit_count(ð1.deposit_contract.address(), block_number, timeout()) - .await - .expect("should get deposit count") -} - -async fn get_block_number(client: &Provider) -> u64 { - client - .get_block_number() - .await - .map(|v| v.as_u64()) - .expect("should get block number") -} - -async fn new_anvil_instance() -> Result { - AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await -} - -mod eth1_cache { - use super::*; - - #[tokio::test] - async fn simple_scenario() { - create_test_tracing_subscriber(); - async { - for follow_distance in 0..3 { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let initial_block_number = get_block_number(&anvil_client).await; - - let config = Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance, - ..Config::default() - }; - let cache_follow_distance = config.cache_follow_distance(); - - let service = - Service::new(config, Arc::new(MainnetEthSpec::default_spec())).unwrap(); - - // Create some blocks and then consume them, performing the test `rounds` times. - for round in 0..2 { - let blocks = 4; - - let initial = if round == 0 { - initial_block_number - } else { - service - .blocks() - .read() - .highest_block_number() - .map(|n| n + cache_follow_distance) - .expect("should have a latest block after the first round") - }; - - for _ in 0..blocks { - eth1.anvil.evm_mine().await.expect("should mine block"); - } - - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - - service - .update_block_cache(None) - .await - .expect("should update cache when nothing has changed"); - - assert_eq!( - service - .blocks() - .read() - .highest_block_number() - .map(|n| n + cache_follow_distance), - Some(initial + blocks), - "should update {} blocks in round {} (follow {} i.e. {})", - blocks, - round, - follow_distance, - cache_follow_distance - ); - } - } - } - .await; - } - - /// Tests the case where we attempt to download more blocks than will fit in the cache. - - #[tokio::test] - async fn big_skip() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let cache_len = 4; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&anvil_client).await, - follow_distance: 0, - block_cache_truncation: Some(cache_len), - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - let blocks = cache_len * 2; - - for _ in 0..blocks { - eth1.anvil.evm_mine().await.expect("should mine block") - } - - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - - assert_eq!( - service.block_cache_len(), - cache_len, - "should not grow cache beyond target" - ); - } - .await; - } - - /// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the - /// cache size. - #[tokio::test] - async fn pruning() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let cache_len = 4; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&anvil_client).await, - follow_distance: 0, - block_cache_truncation: Some(cache_len), - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - for _ in 0..4u8 { - for _ in 0..cache_len / 2 { - eth1.anvil.evm_mine().await.expect("should mine block") - } - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - } - - assert_eq!( - service.block_cache_len(), - cache_len, - "should not grow cache beyond target" - ); - } - .await; - } - - #[tokio::test] - async fn double_update() { - create_test_tracing_subscriber(); - async { - let n = 16; - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&anvil_client).await, - follow_distance: 0, - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - for _ in 0..n { - eth1.anvil.evm_mine().await.expect("should mine block") - } - - futures::try_join!( - service.update_deposit_cache(None), - service.update_deposit_cache(None) - ) - .expect("should perform two simultaneous updates of deposit cache"); - futures::try_join!( - service.update_block_cache(None), - service.update_block_cache(None) - ) - .expect("should perform two simultaneous updates of block cache"); - - assert!(service.block_cache_len() >= n, "should grow the cache"); - } - .await; - } -} - -mod deposit_tree { - - use super::*; - - #[tokio::test] - async fn updating() { - create_test_tracing_subscriber(); - async { - let n = 4; - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let start_block = get_block_number(&anvil_client).await; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: start_block, - follow_distance: 0, - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - for round in 0..3 { - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - service - .update_deposit_cache(None) - .await - .expect("should perform update when nothing has changed"); - - let first = n * round; - let last = n * (round + 1); - - let (_root, local_deposits) = service - .deposits() - .read() - .cache - .get_deposits(first, last, last) - .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); - - assert_eq!( - local_deposits.len(), - n as usize, - "should get the right number of deposits in round {}", - round - ); - - assert_eq!( - local_deposits - .iter() - .map(|d| d.data.clone()) - .collect::>(), - deposits.to_vec(), - "obtained deposits should match those submitted in round {}", - round - ); - } - } - .await; - } - - #[tokio::test] - async fn double_update() { - create_test_tracing_subscriber(); - async { - let n = 8; - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let start_block = get_block_number(&anvil_client).await; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: start_block, - lowest_cached_block_number: start_block, - follow_distance: 0, - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - futures::try_join!( - service.update_deposit_cache(None), - service.update_deposit_cache(None) - ) - .expect("should perform two updates concurrently"); - - assert_eq!(service.deposit_cache_len(), n); - } - .await; - } - - #[tokio::test] - async fn cache_consistency() { - async { - let n = 8; - - let spec = &MainnetEthSpec::default_spec(); - - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let mut deposit_roots = vec![]; - let mut deposit_counts = vec![]; - - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - - // Perform deposits to the smart contract, recording it's state along the way. - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - let block_number = get_block_number(&anvil_client).await; - deposit_roots.push( - blocking_deposit_root(&client, ð1, block_number) - .await - .expect("should get root if contract exists"), - ); - deposit_counts.push( - blocking_deposit_count(&client, ð1, block_number) - .await - .expect("should get count if contract exists"), - ); - } - - let mut tree = DepositCache::default(); - - // Pull all the deposit logs from the contract. - let block_number = get_block_number(&anvil_client).await; - let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) - .await - .iter() - .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) - .inspect(|log| { - tree.insert_log(log.clone()) - .expect("should add consecutive logs"); - }) - .collect(); - - // Check the logs for invariants. - for i in 0..logs.len() { - let log = &logs[i]; - assert_eq!( - log.deposit_data, deposits[i], - "log {} should have correct deposit data", - i - ); - assert_eq!(log.index, i as u64, "log {} should have correct index", i); - } - - // For each deposit test some more invariants - for i in 0..n { - // Ensure the deposit count from the smart contract was as expected. - assert_eq!( - deposit_counts[i], - i as u64 + 1, - "deposit count should be accurate" - ); - - // Ensure that the root from the deposit tree matches what the contract reported. - let (root, deposits) = tree - .get_deposits(0, i as u64, deposit_counts[i]) - .expect("should get deposits"); - assert_eq!( - root, deposit_roots[i], - "tree deposit root {} should match the contract", - i - ); - - // Ensure that the deposits all prove into the root from the smart contract. - let deposit_root = deposit_roots[i]; - for (j, deposit) in deposits.iter().enumerate() { - assert!( - verify_merkle_proof( - deposit.data.tree_hash_root(), - &deposit.proof, - DEPOSIT_CONTRACT_TREE_DEPTH + 1, - j, - deposit_root - ), - "deposit merkle proof should prove into deposit contract root" - ) - } - } - } - .await; - } -} - -/// Tests for the base HTTP requests and response handlers. -mod http { - use super::*; - - async fn get_block(client: &HttpJsonRpc, block_number: u64) -> Block { - client - .get_block(BlockQuery::Number(block_number), timeout()) - .await - .expect("should get block number") - } - - #[tokio::test] - async fn incrementing_deposits() { - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - - let block_number = get_block_number(&anvil_client).await; - let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; - assert_eq!(logs.len(), 0); - - let mut old_root = blocking_deposit_root(&client, ð1, block_number).await; - let mut old_block = get_block(&client, block_number).await; - let mut old_block_number = block_number; - - assert_eq!( - blocking_deposit_count(&client, ð1, block_number).await, - Some(0), - "should have deposit count zero" - ); - - for i in 1..=8 { - eth1.anvil - .increase_time(1) - .await - .expect("should be able to increase time on anvil"); - - deposit_contract - .deposit(random_deposit_data()) - .await - .expect("should perform a deposit"); - - // Check the logs. - let block_number = get_block_number(&anvil_client).await; - let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; - assert_eq!(logs.len(), i, "the number of logs should be as expected"); - - // Check the deposit count. - assert_eq!( - blocking_deposit_count(&client, ð1, block_number).await, - Some(i as u64), - "should have a correct deposit count" - ); - - // Check the deposit root. - let new_root = blocking_deposit_root(&client, ð1, block_number).await; - assert_ne!( - new_root, old_root, - "deposit root should change with each deposit" - ); - old_root = new_root; - - // Check the block hash. - let new_block = get_block(&client, block_number).await; - assert_ne!( - new_block.hash, old_block.hash, - "block hash should change with each deposit" - ); - - // Check to ensure the timestamp is increasing - assert!( - old_block.timestamp <= new_block.timestamp, - "block timestamp should increase" - ); - - old_block = new_block.clone(); - - // Check the block number. - assert!( - block_number > old_block_number, - "block number should increase" - ); - old_block_number = block_number; - - // Check to ensure the block root is changing - assert_ne!( - new_root, - Some(new_block.hash), - "the deposit root should be different to the block hash" - ); - } - } - .await; - } -} - -mod fast { - use super::*; - - // Adds deposits into deposit cache and matches deposit_count and deposit_root - // with the deposit count and root computed from the deposit cache. - #[tokio::test] - async fn deposit_cache_query() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let now = get_block_number(&anvil_client).await; - let spec = Arc::new(MainnetEthSpec::default_spec()); - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Config::default() - }, - spec.clone(), - ) - .unwrap(); - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - let n = 10; - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - // Mine an extra block between deposits to test for corner cases - eth1.anvil.evm_mine().await.expect("should mine block"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - assert!( - service.deposit_cache_len() >= n, - "should have imported n deposits" - ); - - for block_num in 0..=get_block_number(&anvil_client).await { - let expected_deposit_count = - blocking_deposit_count(&client, ð1, block_num).await; - let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; - - let deposit_count = service - .deposits() - .read() - .cache - .get_deposit_count_from_cache(block_num); - let deposit_root = service - .deposits() - .read() - .cache - .get_deposit_root_from_cache(block_num); - assert_eq!( - expected_deposit_count, deposit_count, - "deposit count from cache should match queried" - ); - assert_eq!( - expected_deposit_root, deposit_root, - "deposit root from cache should match queried" - ); - } - } - .await; - } -} - -mod persist { - use super::*; - #[tokio::test] - async fn test_persist_caches() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let now = get_block_number(&anvil_client).await; - let config = Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Config::default() - }; - let service = - Service::new(config.clone(), Arc::new(MainnetEthSpec::default_spec())).unwrap(); - let n = 10; - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - assert!( - service.deposit_cache_len() >= n, - "should have imported n deposits" - ); - - let deposit_count = service.deposit_cache_len(); - - service - .update_block_cache(None) - .await - .expect("should perform update"); - - assert!( - service.block_cache_len() >= n, - "should have imported n eth1 blocks" - ); - - let block_count = service.block_cache_len(); - - let eth1_bytes = service.as_bytes(); - - // Drop service and recover from bytes - drop(service); - - let recovered_service = Service::from_bytes( - ð1_bytes, - config, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - assert_eq!( - recovered_service.block_cache_len(), - block_count, - "Should have equal cached blocks as before recovery" - ); - assert_eq!( - recovered_service.deposit_cache_len(), - deposit_count, - "Should have equal cached deposits as before recovery" - ); - } - .await; - } -} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4bfee223ff..3c82e6251b 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -380,7 +380,7 @@ pub enum GetPayloadResponseType { } impl GetPayloadResponse { - pub fn execution_payload_ref(&self) -> ExecutionPayloadRef { + pub fn execution_payload_ref(&self) -> ExecutionPayloadRef<'_, E> { self.to_ref().into() } } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c79036ba61..300713fdca 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -727,7 +727,7 @@ impl HttpJsonRpc { pub async fn get_blobs_v2( &self, versioned_hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let params = json!([versioned_hashes]); self.rpc_request( diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 4761c47d41..cf751138d6 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1864,7 +1864,7 @@ impl ExecutionLayer { pub async fn get_blobs_v2( &self, query: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let capabilities = self.get_engine_capabilities(None).await?; if capabilities.get_blobs_v2 { diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b057abe887..e01b8de9e3 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -142,6 +142,7 @@ pub struct ExecutionBlockGenerator { pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, + min_blobs_count: usize, /* * Post-merge fork triggers */ @@ -188,6 +189,7 @@ impl ExecutionBlockGenerator { pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), + min_blobs_count: 0, shanghai_time, cancun_time, prague_time, @@ -318,6 +320,10 @@ impl ExecutionBlockGenerator { Ok(()) } + pub fn set_min_blob_count(&mut self, count: usize) { + self.min_blobs_count = count; + } + pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { if let Some(finalized_block_hash) = self.finalized_block_hash { return Err(format!( @@ -702,8 +708,10 @@ impl ExecutionBlockGenerator { if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let max_blobs = self.spec.max_blobs_per_block_by_fork(fork_name) as usize; - let num_blobs = rng.gen::() % (max_blobs + 1); + // TODO(EIP-7892): see FIXME below + // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. + let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; + let num_blobs = rng.gen_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { execution_payload diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index f752b888a7..8f6f3516fc 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -5,21 +5,12 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -environment = { workspace = true } -eth1 = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } -futures = { workspace = true } int_to_bytes = { workspace = true } merkle_proof = { workspace = true } rayon = { workspace = true } state_processing = { workspace = true } -tokio = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } - -[dev-dependencies] -eth1_test_rig = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs deleted file mode 100644 index dede96512c..0000000000 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ /dev/null @@ -1,461 +0,0 @@ -pub use crate::common::genesis_deposits; -pub use eth1::Config as Eth1Config; - -use eth1::{DepositLog, Eth1Block, Service as Eth1Service}; -use state_processing::{ - eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, - per_block_processing::process_operations::apply_deposit, process_activations, -}; -use std::sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, -}; -use std::time::Duration; -use tokio::time::sleep; -use tracing::{debug, error, info, trace}; -use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, FixedBytesExtended, Hash256}; - -/// The number of blocks that are pulled per request whilst waiting for genesis. -const BLOCKS_PER_GENESIS_POLL: usize = 99; - -/// Stats about the eth1 genesis process. -pub struct Statistics { - highest_processed_block: AtomicU64, - active_validator_count: AtomicUsize, - total_deposit_count: AtomicUsize, - latest_timestamp: AtomicU64, -} - -/// Provides a service that connects to some Eth1 HTTP JSON-RPC endpoint and maintains a cache of -/// eth1 blocks and deposits, listening for the eth1 block that triggers eth2 genesis and returning -/// the genesis `BeaconState`. -/// -/// Is a wrapper around the `Service` struct of the `eth1` crate. -#[derive(Clone)] -pub struct Eth1GenesisService { - /// The underlying service. Access to this object is only required for testing and diagnosis. - pub eth1_service: Eth1Service, - /// Statistics about genesis progress. - stats: Arc, -} - -impl Eth1GenesisService { - /// Creates a new service. Does not attempt to connect to the Eth1 node. - /// - /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, spec: Arc) -> Result { - let config = Eth1Config { - // Truncating the block cache makes searching for genesis more - // complicated. - block_cache_truncation: None, - // Scan large ranges of blocks when awaiting genesis. - blocks_per_log_query: 1_000, - // Only perform a few log requests each time the eth1 node is polled. - // - // For small testnets this makes finding genesis much faster, - // as it usually happens within 1,000 blocks. - max_log_requests_per_update: Some(5), - // Only perform a few logs requests each time the eth1 node is polled. - // - // For small testnets, this is much faster as they do not have - // a `MIN_GENESIS_SECONDS`, so after `MIN_GENESIS_VALIDATOR_COUNT` - // has been reached only a single block needs to be read. - max_blocks_per_update: Some(BLOCKS_PER_GENESIS_POLL), - ..config - }; - - Ok(Self { - eth1_service: Eth1Service::new(config, spec) - .map_err(|e| format!("Failed to create eth1 service: {:?}", e))?, - stats: Arc::new(Statistics { - highest_processed_block: AtomicU64::new(0), - active_validator_count: AtomicUsize::new(0), - total_deposit_count: AtomicUsize::new(0), - latest_timestamp: AtomicU64::new(0), - }), - }) - } - - /// Returns the first eth1 block that has enough deposits that it's a (potentially invalid) - /// candidate for genesis. - fn first_candidate_eth1_block(&self, min_genesis_active_validator_count: usize) -> Option { - if self.eth1_service.deposit_cache_len() < min_genesis_active_validator_count { - None - } else { - self.eth1_service - .deposits() - .read() - .cache - .get_log(min_genesis_active_validator_count.saturating_sub(1)) - .map(|log| log.block_number) - } - } - - /// Scans the Eth1 chain, returning a genesis state once it has been discovered. - /// - /// ## Returns - /// - /// - `Ok(state)` once the canonical eth2 genesis state has been discovered. - /// - `Err(e)` if there is some internal error during updates. - pub async fn wait_for_genesis_state( - &self, - update_interval: Duration, - ) -> Result, String> { - let eth1_service = &self.eth1_service; - let spec = eth1_service.chain_spec(); - - let mut sync_blocks = false; - let mut highest_processed_block = None; - - info!("Importing eth1 deposit logs"); - - loop { - let update_result = eth1_service - .update_deposit_cache(None) - .await - .map_err(|e| format!("{:?}", e)); - - if let Err(e) = update_result { - error!(error = e, "Failed to update eth1 deposit cache") - } - - self.stats - .total_deposit_count - .store(eth1_service.deposit_cache_len(), Ordering::Relaxed); - - if !sync_blocks { - if let Some(viable_eth1_block) = self - .first_candidate_eth1_block(spec.min_genesis_active_validator_count as usize) - { - info!("Importing eth1 blocks"); - self.eth1_service.set_lowest_cached_block(viable_eth1_block); - sync_blocks = true - } else { - info!( - min_genesis_active_validators = spec.min_genesis_active_validator_count, - total_deposits = eth1_service.deposit_cache_len(), - valid_deposits = eth1_service.get_raw_valid_signature_count(), - "Waiting for more deposits" - ); - - sleep(update_interval).await; - - continue; - } - } - - // Download new eth1 blocks into the cache. - let blocks_imported = match eth1_service.update_block_cache(None).await { - Ok(outcome) => { - debug!( - latest_block_timestamp = eth1_service.latest_block_timestamp(), - cache_head = eth1_service.highest_safe_block(), - count = outcome.blocks_imported, - "Imported eth1 blocks" - ); - outcome.blocks_imported - } - Err(e) => { - error!( - error = ?e, - "Failed to update eth1 block cache" - ); - 0 - } - }; - - // Scan the new eth1 blocks, searching for genesis. - if let Some(genesis_state) = - self.scan_new_blocks::(&mut highest_processed_block, spec)? - { - info!( - genesis_validators = genesis_state - .get_active_validator_indices(E::genesis_epoch(), spec) - .map_err(|e| format!("Genesis validators error: {:?}", e))? - .len(), - genesis_time = genesis_state.genesis_time(), - "Genesis ceremony complete" - ); - break Ok(genesis_state); - } - - // Drop all the scanned blocks as they are no longer required. - eth1_service.clear_block_cache(); - - // Load some statistics from the atomics. - let active_validator_count = self.stats.active_validator_count.load(Ordering::Relaxed); - let total_deposit_count = self.stats.total_deposit_count.load(Ordering::Relaxed); - let latest_timestamp = self.stats.latest_timestamp.load(Ordering::Relaxed); - - // Perform some logging. - if timestamp_can_trigger_genesis(latest_timestamp, spec)? { - // Indicate that we are awaiting adequate active validators. - if (active_validator_count as u64) < spec.min_genesis_active_validator_count { - info!( - min_genesis_active_validators = spec.min_genesis_active_validator_count, - active_validators = active_validator_count, - total_deposits = total_deposit_count, - valid_deposits = eth1_service.get_valid_signature_count().unwrap_or(0), - "Waiting for more validators" - ); - } - } else { - info!( - genesis_delay = spec.genesis_delay, - genesis_time = spec.min_genesis_time, - latest_eth1_timestamp = latest_timestamp, - "Waiting for adequate eth1 timestamp" - ); - } - - // If we imported the full number of blocks, poll again in a short amount of time. - // - // We assume that if we imported a large chunk of blocks then we're some distance from - // the head and we should sync faster. - if blocks_imported >= BLOCKS_PER_GENESIS_POLL { - sleep(Duration::from_millis(50)).await; - } else { - sleep(update_interval).await; - } - } - } - - /// Processes any new blocks that have appeared since this function was last run. - /// - /// Blocks are always tested in increasing order, starting with the lowest unknown block - /// number in the cache. - /// - /// ## Returns - /// - /// - `Ok(Some(eth1_block))` if a previously-unprocessed block would trigger Eth2 genesis. - /// - `Ok(None)` if none of the new blocks would trigger genesis, or there were no new blocks. - /// - `Err(_)` if there was some internal error. - fn scan_new_blocks( - &self, - highest_processed_block: &mut Option, - spec: &ChainSpec, - ) -> Result>, String> { - let eth1_service = &self.eth1_service; - - for block in eth1_service.blocks().read().iter() { - // It's possible that the block and deposit caches aren't synced. Ignore any blocks - // which are not safe for both caches. - // - // Don't update the highest processed block since we want to come back and process this - // again later. - if eth1_service - .highest_safe_block() - .is_none_or(|n| block.number > n) - { - continue; - } - - // Ignore any block that has already been processed or update the highest processed - // block. - if highest_processed_block.is_some_and(|highest| highest >= block.number) { - continue; - } else { - self.stats - .highest_processed_block - .store(block.number, Ordering::Relaxed); - self.stats - .latest_timestamp - .store(block.timestamp, Ordering::Relaxed); - - *highest_processed_block = Some(block.number) - } - - // Ignore any block with an insufficient timestamp. - if !timestamp_can_trigger_genesis(block.timestamp, spec)? { - trace!( - genesis_delay = spec.genesis_delay, - min_genesis_time = spec.min_genesis_time, - eth1_block_timestamp = block.timestamp, - eth1_block_number = block.number, - "Insufficient block timestamp" - ); - continue; - } - - let valid_signature_count = eth1_service - .get_valid_signature_count_at_block(block.number) - .unwrap_or(0); - if (valid_signature_count as u64) < spec.min_genesis_active_validator_count { - trace!( - genesis_delay = spec.genesis_delay, - valid_signature_count = valid_signature_count, - min_validator_count = spec.min_genesis_active_validator_count, - eth1_block_number = block.number, - "Insufficient valid signatures" - ); - continue; - } - - // Generate a potential beacon state for this eth1 block. - // - // Note: this state is fully valid, some fields have been bypassed to make verification - // faster. - let state = self.cheap_state_at_eth1_block::(block, spec)?; - let active_validator_count = state - .get_active_validator_indices(E::genesis_epoch(), spec) - .map_err(|e| format!("Genesis validators error: {:?}", e))? - .len(); - - self.stats - .active_validator_count - .store(active_validator_count, Ordering::Relaxed); - - if is_valid_genesis_state(&state, spec) { - let genesis_state = self - .genesis_from_eth1_block(block.clone(), spec) - .map_err(|e| format!("Failed to generate valid genesis state : {}", e))?; - - return Ok(Some(genesis_state)); - } else { - trace!( - min_genesis_active_validator_count = - format!("{}", spec.min_genesis_active_validator_count), - active_validators = active_validator_count, - eth1_block_number = block.number, - "Insufficient active validators" - ); - } - } - - Ok(None) - } - - /// Produces an eth2 genesis `BeaconState` from the given `eth1_block`. The caller should have - /// verified that `eth1_block` produces a valid genesis state. - /// - /// ## Returns - /// - /// - `Ok(genesis_state)`: if all went well. - /// - `Err(e)`: if the given `eth1_block` was not a viable block to trigger genesis or there was - /// an internal error. - fn genesis_from_eth1_block( - &self, - eth1_block: Eth1Block, - spec: &ChainSpec, - ) -> Result, String> { - let deposit_logs = self - .eth1_service - .deposits() - .read() - .cache - .iter() - .take_while(|log| log.block_number <= eth1_block.number) - .map(|log| log.deposit_data.clone()) - .collect::>(); - - let genesis_state = initialize_beacon_state_from_eth1( - eth1_block.hash, - eth1_block.timestamp, - genesis_deposits(deposit_logs, spec)?, - None, - spec, - ) - .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - - if is_valid_genesis_state(&genesis_state, spec) { - Ok(genesis_state) - } else { - Err("Generated state was not valid.".to_string()) - } - } - - /// Generates an incomplete `BeaconState` for some `eth1_block` that can be used for checking - /// to see if that `eth1_block` triggers eth2 genesis. - /// - /// ## Notes - /// - /// The returned `BeaconState` should **not** be used as the genesis state, it is - /// incomplete. - fn cheap_state_at_eth1_block( - &self, - eth1_block: &Eth1Block, - spec: &ChainSpec, - ) -> Result, String> { - let genesis_time = eth2_genesis_time(eth1_block.timestamp, spec) - .map_err(|e| format!("Unable to set genesis time: {:?}", e))?; - - let mut state: BeaconState = BeaconState::new( - genesis_time, - Eth1Data { - block_hash: Hash256::zero(), - deposit_root: Hash256::zero(), - deposit_count: 0, - }, - spec, - ); - - self.deposit_logs_at_block(eth1_block.number) - .iter() - .map(|deposit_log| Deposit { - // Generate a bogus proof. - // - // The deposits are coming directly from our own deposit tree to there's no need to - // make proofs about their inclusion in it. - proof: vec![Hash256::zero(); spec.deposit_contract_tree_depth as usize].into(), - data: deposit_log.deposit_data.clone(), - }) - .try_for_each(|deposit| { - // Skip proof verification (see comment about bogus proof generation). - const PROOF_VERIFICATION: bool = false; - - // Note: presently all the signatures are verified each time this function is - // run. - // - // It would be more efficient to pre-verify signatures, filter out the invalid - // ones and disable verification for `process_deposit`. - // - // Such an optimization would only be useful in a scenario where `MIN_GENESIS_TIME` - // is reached _prior_ to `MIN_ACTIVE_VALIDATOR_COUNT`. I suspect this won't be the - // case for mainnet, so we defer this optimization. - let Deposit { proof, data } = deposit; - let proof = if PROOF_VERIFICATION { - Some(proof) - } else { - None - }; - - apply_deposit(&mut state, data, proof, true, spec) - .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) - })?; - - process_activations(&mut state, spec) - .map_err(|e| format!("Error whilst processing activations: {:?}", e))?; - - Ok(state) - } - - /// Returns all deposit logs included in `block_number` and all prior blocks. - fn deposit_logs_at_block(&self, block_number: u64) -> Vec { - self.eth1_service - .deposits() - .read() - .cache - .iter() - .take_while(|log| log.block_number <= block_number) - .cloned() - .collect() - } - - /// Returns statistics about eth1 genesis. - pub fn statistics(&self) -> &Statistics { - &self.stats - } - - /// Returns the `Service` contained in `self`. - pub fn into_core_service(self) -> Eth1Service { - self.eth1_service - } -} - -/// Returns `false` for a timestamp that would result in a genesis time that is earlier than -/// `MIN_GENESIS_TIME`. -fn timestamp_can_trigger_genesis(timestamp: u64, spec: &ChainSpec) -> Result { - eth2_genesis_time(timestamp, spec) - .map(|t| t >= spec.min_genesis_time) - .map_err(|e| format!("Arith error when during genesis calculation: {:?}", e)) -} diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 1fba64aafb..35f0b0e380 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -1,10 +1,6 @@ mod common; -mod eth1_genesis_service; mod interop; -pub use eth1::Config as Eth1Config; -pub use eth1::Eth1Endpoint; -pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{ bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH, diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs deleted file mode 100644 index b5710e50fd..0000000000 --- a/beacon_node/genesis/tests/tests.rs +++ /dev/null @@ -1,107 +0,0 @@ -#![cfg(test)] -use environment::{Environment, EnvironmentBuilder}; -use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; -use genesis::{Eth1Config, Eth1GenesisService}; -use logging::create_test_tracing_subscriber; -use sensitive_url::SensitiveUrl; -use state_processing::is_valid_genesis_state; -use std::sync::Arc; -use std::time::Duration; -use types::{ - test_utils::generate_deterministic_keypair, FixedBytesExtended, Hash256, MinimalEthSpec, -}; - -pub fn new_env() -> Environment { - create_test_tracing_subscriber(); - EnvironmentBuilder::minimal() - .multi_threaded_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("should build env") -} - -#[test] -fn basic() { - let env = new_env(); - let mut spec = (*env.eth2_config().spec).clone(); - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 8; - let spec = Arc::new(spec); - - env.runtime().block_on(async { - let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()) - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let client = eth1.json_rpc_client(); - - let now = client - .get_block_number() - .await - .map(|v| v.as_u64()) - .expect("should get block number"); - - let service = Eth1GenesisService::new( - Eth1Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Eth1Config::default() - }, - spec.clone(), - ) - .unwrap(); - - // NOTE: this test is sensitive to the response speed of the external web3 server. If - // you're experiencing failures, try increasing the update_interval. - let update_interval = Duration::from_millis(500); - - let deposits = (0..spec.min_genesis_active_validator_count + 2) - .map(|i| { - deposit_contract.deposit_helper::( - generate_deterministic_keypair(i as usize), - Hash256::from_low_u64_le(i), - 32_000_000_000, - ) - }) - .map(|deposit| DelayThenDeposit { - delay: Duration::from_secs(0), - deposit, - }) - .collect::>(); - - let deposit_future = deposit_contract.deposit_multiple(deposits); - - let wait_future = service.wait_for_genesis_state::(update_interval); - - let state = futures::try_join!(deposit_future, wait_future) - .map(|(_, state)| state) - .expect("should finish waiting for genesis"); - - // Note: using anvil these deposits are 1-per-block, therefore we know there should only be - // the minimum number of validators. - assert_eq!( - state.validators().len(), - spec.min_genesis_active_validator_count as usize, - "should have expected validator count" - ); - - assert!(state.genesis_time() > 0, "should have some genesis time"); - - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); - - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); - }); -} diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index afc68ad96d..781a4cfa44 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -12,7 +12,6 @@ bs58 = "0.4.0" bytes = { workspace = true } directory = { workspace = true } either = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index cdef1521ec..e33de25470 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,14 +1,16 @@ +use crate::version::inconsistent_fork_rejection; use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; +use eth2::types::DataColumnIndicesQuery; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ - BlobSidecarList, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlock, - SignedBlindedBeaconBlock, Slot, + BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, + SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; @@ -19,6 +21,13 @@ pub struct BlockId(pub CoreBlockId); type Finalized = bool; +type DataColumnsResponse = ( + DataColumnSidecarList<::EthSpec>, + ForkName, + ExecutionOptimistic, + Finalized, +); + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -260,6 +269,47 @@ impl BlockId { } } + pub fn get_data_columns( + &self, + query: DataColumnIndicesQuery, + chain: &BeaconChain, + ) -> Result, Rejection> { + let (root, execution_optimistic, finalized) = self.root(chain)?; + let block = BlockId::blinded_block_by_root(&root, chain)?.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) + })?; + + if !chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + return Err(warp_utils::reject::custom_bad_request( + "block is pre-Fulu and has no data columns".to_string(), + )); + } + + let data_column_sidecars = if let Some(indices) = query.indices { + indices + .iter() + .filter_map(|index| chain.get_data_column(&root, index).transpose()) + .collect::, _>>() + .map_err(warp_utils::reject::unhandled_error)? + } else { + chain + .get_data_columns(&root) + .map_err(warp_utils::reject::unhandled_error)? + .unwrap_or_default() + }; + + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + Ok(( + data_column_sidecars, + fork_name, + execution_optimistic, + finalized, + )) + } + #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 2eaa33a964..a627fb0353 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -40,16 +40,16 @@ use beacon_chain::{ validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, }; -use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; +use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; -use either::Either; use eth2::types::{ self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, - ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, - ValidatorId, ValidatorStatus, ValidatorsRequestBody, + ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, StateId as CoreStateId, + ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, ValidatorStatus, + ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; @@ -64,7 +64,6 @@ pub use publish_blocks::{ publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, }; use serde::{Deserialize, Serialize}; -use serde_json::Value; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; @@ -87,13 +86,13 @@ use tokio_stream::{ StreamExt, }; use tracing::{debug, error, info, warn}; -use types::AttestationData; use types::{ - Attestation, AttestationShufflingId, AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, - CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, ProposerPreparationData, - ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, - SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, + ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, + ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, + SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, + SignedValidatorRegistrationData, SignedVoluntaryExit, SingleAttestation, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -132,8 +131,6 @@ pub struct Context { pub network_senders: Option>, pub network_globals: Option>>, pub beacon_processor_send: Option>, - pub beacon_processor_reprocess_send: Option>, - pub eth1_service: Option, pub sse_logging_components: Option, } @@ -217,7 +214,6 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( } }); - // Create a `warp` filter that provides access to the Eth1 service. - let inner_ctx = ctx.clone(); - let eth1_service_filter = warp::any() - .map(move || inner_ctx.eth1_service.clone()) - .and_then(|eth1_service| async move { - match eth1_service { - Some(eth1_service) => Ok(eth1_service), - None => Err(warp_utils::reject::custom_not_found( - "The Eth1 service is not started. Use --eth1 on the CLI.".to_string(), - )), - } - }); - // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = warp::any() @@ -556,11 +539,6 @@ pub fn serve( .filter(|_| config.enable_beacon_processor); let task_spawner_filter = warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); - let beacon_processor_reprocess_send = ctx - .beacon_processor_reprocess_send - .clone() - .filter(|_| config.enable_beacon_processor); - let reprocess_send_filter = warp::any().map(move || beacon_processor_reprocess_send.clone()); let duplicate_block_status_code = ctx.config.duplicate_block_status_code; @@ -725,6 +703,34 @@ pub fn serve( }, ); + // POST beacon/states/{state_id}/validator_identities + let post_beacon_state_validator_identities = beacon_states_path + .clone() + .and(warp::path("validator_identities")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorIdentitiesRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validator_identities( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ); + // GET beacon/states/{state_id}/validators?id,status let get_beacon_state_validators = beacon_states_path .clone() @@ -1981,74 +1987,25 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()); - let post_beacon_pool_attestations_v1 = beacon_pool_path - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .and(reprocess_send_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - attestations: Vec>, - network_tx: UnboundedSender>, - reprocess_tx: Option>| async move { - let attestations = attestations.into_iter().map(Either::Left).collect(); - let result = crate::publish_attestations::publish_attestations( - task_spawner, - chain, - attestations, - network_tx, - reprocess_tx, - ) - .await - .map(|()| warp::reply::json(&())); - convert_rejection(result).await - }, - ); - let post_beacon_pool_attestations_v2 = beacon_pool_path_v2 .clone() .and(warp::path("attestations")) .and(warp::path::end()) - .and(warp_utils::json::json::()) + .and(warp_utils::json::json::>()) .and(optional_consensus_version_header_filter) .and(network_tx_filter.clone()) - .and(reprocess_send_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, - payload: Value, - fork_name: Option, - network_tx: UnboundedSender>, - reprocess_tx: Option>| async move { - let attestations = - match crate::publish_attestations::deserialize_attestation_payload::( - payload, fork_name, - ) { - Ok(attestations) => attestations, - Err(err) => { - warn!( - error = ?err, - "Unable to deserialize attestation POST request" - ); - return warp::reply::with_status( - warp::reply::json( - &"Unable to deserialize request body".to_string(), - ), - eth2::StatusCode::BAD_REQUEST, - ) - .into_response(); - } - }; - + attestations: Vec, + _fork_name: Option, + network_tx: UnboundedSender>| async move { let result = crate::publish_attestations::publish_attestations( task_spawner, chain, attestations, network_tx, - reprocess_tx, + true, ) .await .map(|()| warp::reply::json(&())); @@ -2453,56 +2410,6 @@ pub fn serve( }, ); - // GET beacon/deposit_snapshot - let get_beacon_deposit_snapshot = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("deposit_snapshot")) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .and(task_spawner_filter.clone()) - .and(eth1_service_filter.clone()) - .then( - |accept_header: Option, - task_spawner: TaskSpawner, - eth1_service: eth1::Service| { - task_spawner.blocking_response_task(Priority::P1, move || match accept_header { - Some(api_types::Accept::Ssz) => eth1_service - .get_deposit_snapshot() - .map(|snapshot| { - Response::builder() - .status(200) - .body(snapshot.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }) - .unwrap_or_else(|| { - Response::builder() - .status(503) - .body(Vec::new().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }), - _ => { - let snapshot = eth1_service.get_deposit_snapshot(); - Ok( - warp::reply::json(&api_types::GenericResponse::from(snapshot)) - .into_response(), - ) - } - }) - }, - ); - let beacon_rewards_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("rewards")) @@ -2883,6 +2790,55 @@ pub fn serve( * debug */ + // GET debug/beacon/data_column_sidecars/{block_id} + let get_debug_data_column_sidecars = eth_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("data_column_sidecars")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) + .then( + |block_id: BlockId, + indices_res: Result, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let indices = indices_res?; + let (data_columns, fork_name, execution_optimistic, finalized) = + block_id.get_data_columns(indices, &chain)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(data_columns.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &data_columns, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + // GET debug/beacon/states/{state_id} let get_debug_beacon_states = any_version .and(warp::path("debug")) @@ -3765,15 +3721,17 @@ pub fn serve( .and(warp::path::end()) .and(warp_utils::json::json()) .and(validator_subscription_tx_filter.clone()) + .and(network_tx_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |subscriptions: Vec, + |committee_subscriptions: Vec, validator_subscription_tx: Sender, + network_tx: UnboundedSender>, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { - let subscriptions: std::collections::BTreeSet<_> = subscriptions + let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions .iter() .map(|subscription| { chain @@ -3788,6 +3746,7 @@ pub fn serve( } }) .collect(); + let message = ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; if let Err(e) = validator_subscription_tx.try_send(message) { @@ -3802,6 +3761,42 @@ pub fn serve( )); } + if chain.spec.is_peer_das_scheduled() { + let (finalized_beacon_state, _, _) = + StateId(CoreStateId::Finalized).state(&chain)?; + let validators_and_balances = committee_subscriptions + .iter() + .filter_map(|subscription| { + if let Ok(effective_balance) = finalized_beacon_state + .get_effective_balance(subscription.validator_index as usize) + { + Some((subscription.validator_index as usize, effective_balance)) + } else { + None + } + }) + .collect::>(); + + let current_slot = + chain.slot().map_err(warp_utils::reject::unhandled_error)?; + if let Some(cgc_change) = chain + .data_availability_checker + .custody_context() + .register_validators::( + validators_and_balances, + current_slot, + &chain.spec, + ) { + network_tx.send(NetworkMessage::CustodyCountChanged { + new_custody_group_count: cgc_change.new_custody_group_count, + sampling_count: cgc_change.sampling_count, + }).unwrap_or_else(|e| { + debug!(error = %e, "Could not send message to the network service. \ + Likely shutdown") + }); + } + } + Ok(()) }) }, @@ -4505,105 +4500,17 @@ pub fn serve( }, ); - // GET lighthouse/eth1/syncing - let get_lighthouse_eth1_syncing = warp::path("lighthouse") - .and(warp::path("eth1")) - .and(warp::path("syncing")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let current_slot_opt = chain.slot().ok(); - - chain - .eth1_chain - .as_ref() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "Eth1 sync is disabled. See the --eth1 CLI flag.".to_string(), - ) - }) - .and_then(|eth1| { - eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) - .ok_or_else(|| { - warp_utils::reject::custom_server_error( - "Unable to determine Eth1 sync status".to_string(), - ) - }) - }) - .map(api_types::GenericResponse::from) - }) - }, - ); - - // GET lighthouse/eth1/block_cache - let get_lighthouse_eth1_block_cache = warp::path("lighthouse") - .and(warp::path("eth1")) - .and(warp::path("block_cache")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(eth1_service_filter.clone()) - .then( - |task_spawner: TaskSpawner, eth1_service: eth1::Service| { - task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - eth1_service - .blocks() - .read() - .iter() - .cloned() - .collect::>(), - )) - }) - }, - ); - - // GET lighthouse/eth1/deposit_cache - let get_lighthouse_eth1_deposit_cache = warp::path("lighthouse") - .and(warp::path("eth1")) - .and(warp::path("deposit_cache")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(eth1_service_filter) - .then( - |task_spawner: TaskSpawner, eth1_service: eth1::Service| { - task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - eth1_service - .deposits() - .read() - .cache - .iter() - .cloned() - .collect::>(), - )) - }) - }, - ); - // GET lighthouse/staking let get_lighthouse_staking = warp::path("lighthouse") .and(warp::path("staking")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - if chain.eth1_chain.is_some() { - Ok(()) - } else { - Err(warp_utils::reject::custom_not_found( - "staking is not enabled, \ - see the --staking CLI flag" - .to_string(), - )) - } - }) - }, - ); + .then(|task_spawner: TaskSpawner| { + // This API is fairly useless since we abolished the distinction between staking and + // non-staking nodes. We keep it for backwards-compatibility with LH v7.0.0, and in case + // we want to reintroduce the distinction in future. + task_spawner.blocking_json_task(Priority::P1, move || Ok(())) + }); let database_path = warp::path("lighthouse").and(warp::path("database")); @@ -4741,6 +4648,9 @@ pub fn serve( api_types::EventTopic::BlobSidecar => { event_handler.subscribe_blob_sidecar() } + api_types::EventTopic::DataColumnSidecar => { + event_handler.subscribe_data_column_sidecar() + } api_types::EventTopic::Attestation => { event_handler.subscribe_attestation() } @@ -4902,12 +4812,12 @@ pub fn serve( .uor(get_beacon_pool_proposer_slashings) .uor(get_beacon_pool_voluntary_exits) .uor(get_beacon_pool_bls_to_execution_changes) - .uor(get_beacon_deposit_snapshot) .uor(get_beacon_rewards_blocks) .uor(get_config_fork_schedule) .uor(get_config_spec) .uor(get_config_deposit_contract) .uor(get_debug_beacon_states) + .uor(get_debug_data_column_sidecars) .uor(get_debug_beacon_heads) .uor(get_debug_fork_choice) .uor(get_node_identity) @@ -4933,9 +4843,6 @@ pub fn serve( .uor(get_lighthouse_proto_array) .uor(get_lighthouse_validator_inclusion_global) .uor(get_lighthouse_validator_inclusion) - .uor(get_lighthouse_eth1_syncing) - .uor(get_lighthouse_eth1_block_cache) - .uor(get_lighthouse_eth1_deposit_cache) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) .uor(get_lighthouse_block_rewards) @@ -4966,7 +4873,6 @@ pub fn serve( .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) .uor(post_beacon_blinded_blocks_v2) - .uor(post_beacon_pool_attestations_v1) .uor(post_beacon_pool_attestations_v2) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) @@ -4975,6 +4881,7 @@ pub fn serve( .uor(post_beacon_pool_bls_to_execution_changes) .uor(post_beacon_state_validators) .uor(post_beacon_state_validator_balances) + .uor(post_beacon_state_validator_identities) .uor(post_beacon_rewards_attestations) .uor(post_beacon_rewards_sync_committee) .uor(post_validator_duties_attester) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 971571f487..44286736f3 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -227,7 +227,7 @@ fn compute_historic_proposer_duties( } let indices = state - .get_beacon_proposer_indices(&chain.spec) + .get_beacon_proposer_indices(epoch, &chain.spec) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index db85b8f205..a4fcb27b1d 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -36,24 +36,19 @@ //! attestations and there's no immediate cause for concern. use crate::task_spawner::{Priority, TaskSpawner}; use beacon_chain::{ - single_attestation::single_attestation_to_attestation, validator_monitor::timestamp_now, - AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes, + validator_monitor::timestamp_now, AttestationError, BeaconChain, BeaconChainError, + BeaconChainTypes, }; use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; -use either::Either; +use beacon_processor::{Work, WorkEvent}; use eth2::types::Failure; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use serde_json::Value; -use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; -use tokio::sync::{ - mpsc::{Sender, UnboundedSender}, - oneshot, -}; +use tokio::sync::{mpsc::UnboundedSender, oneshot}; use tracing::{debug, error, warn}; -use types::{Attestation, EthSpec, ForkName, SingleAttestation}; +use types::SingleAttestation; // Error variants are only used in `Debug` and considered `dead_code` by the compiler. #[derive(Debug)] @@ -65,8 +60,6 @@ pub enum Error { ReprocessDisabled, ReprocessFull, ReprocessTimeout, - InvalidJson(#[allow(dead_code)] serde_json::Error), - FailedConversion(#[allow(dead_code)] Box), } enum PublishAttestationResult { @@ -76,66 +69,24 @@ enum PublishAttestationResult { Failure(Error), } -#[allow(clippy::type_complexity)] -pub fn deserialize_attestation_payload( - payload: Value, - fork_name: Option, -) -> Result, SingleAttestation>>, Error> { - if fork_name.is_some_and(|fork_name| fork_name.electra_enabled()) || fork_name.is_none() { - if fork_name.is_none() { - warn!("No Consensus Version header specified."); - } - - Ok(serde_json::from_value::>(payload) - .map_err(Error::InvalidJson)? - .into_iter() - .map(Either::Right) - .collect()) - } else { - Ok( - serde_json::from_value::>>(payload) - .map_err(Error::InvalidJson)? - .into_iter() - .map(Either::Left) - .collect(), - ) - } -} - fn verify_and_publish_attestation( chain: &Arc>, - either_attestation: &Either, SingleAttestation>, + attestation: &SingleAttestation, seen_timestamp: Duration, network_tx: &UnboundedSender>, ) -> Result<(), Error> { - let attestation = convert_to_attestation(chain, either_attestation)?; let verified_attestation = chain - .verify_unaggregated_attestation_for_gossip(&attestation, None) + .verify_unaggregated_attestation_for_gossip(attestation, None) .map_err(Error::Validation)?; - match either_attestation { - Either::Left(attestation) => { - // Publish. - network_tx - .send(NetworkMessage::Publish { - messages: vec![PubsubMessage::Attestation(Box::new(( - verified_attestation.subnet_id(), - attestation.clone(), - )))], - }) - .map_err(|_| Error::Publication)?; - } - Either::Right(single_attestation) => { - network_tx - .send(NetworkMessage::Publish { - messages: vec![PubsubMessage::SingleAttestation(Box::new(( - verified_attestation.subnet_id(), - single_attestation.clone(), - )))], - }) - .map_err(|_| Error::Publication)?; - } - } + network_tx + .send(NetworkMessage::Publish { + messages: vec![PubsubMessage::Attestation(Box::new(( + verified_attestation.subnet_id(), + attestation.clone(), + )))], + }) + .map_err(|_| Error::Publication)?; // Notify the validator monitor. chain @@ -172,73 +123,24 @@ fn verify_and_publish_attestation( } } -fn convert_to_attestation<'a, T: BeaconChainTypes>( - chain: &Arc>, - attestation: &'a Either, SingleAttestation>, -) -> Result>, Error> { - match attestation { - Either::Left(a) => Ok(Cow::Borrowed(a)), - Either::Right(single_attestation) => { - let conversion_result = chain.with_committee_cache( - single_attestation.data.target.root, - single_attestation - .data - .slot - .epoch(T::EthSpec::slots_per_epoch()), - |committee_cache, _| { - let Some(committee) = committee_cache.get_beacon_committee( - single_attestation.data.slot, - single_attestation.committee_index, - ) else { - return Ok(Err(AttestationError::NoCommitteeForSlotAndIndex { - slot: single_attestation.data.slot, - index: single_attestation.committee_index, - })); - }; - - Ok(single_attestation_to_attestation::( - single_attestation, - committee.committee, - ) - .map(Cow::Owned)) - }, - ); - match conversion_result { - Ok(Ok(attestation)) => Ok(attestation), - Ok(Err(e)) => Err(Error::Validation(e)), - // Map the error returned by `with_committee_cache` for unknown blocks into the - // `UnknownHeadBlock` error that is gracefully handled. - Err(BeaconChainError::MissingBeaconBlock(beacon_block_root)) => { - Err(Error::Validation(AttestationError::UnknownHeadBlock { - beacon_block_root, - })) - } - Err(e) => Err(Error::FailedConversion(Box::new(e))), - } - } - } -} - pub async fn publish_attestations( task_spawner: TaskSpawner, chain: Arc>, - attestations: Vec, SingleAttestation>>, + attestations: Vec, network_tx: UnboundedSender>, - reprocess_send: Option>, + allow_reprocess: bool, ) -> Result<(), warp::Rejection> { // Collect metadata about attestations which we'll use to report failures. We need to // move the `attestations` vec into the blocking task, so this small overhead is unavoidable. let attestation_metadata = attestations .iter() - .map(|att| match att { - Either::Left(att) => (att.data().slot, att.committee_index()), - Either::Right(att) => (att.data.slot, Some(att.committee_index)), - }) + .map(|att| (att.data.slot, Some(att.committee_index))) .collect::>(); // Gossip validate and publish attestations that can be immediately processed. let seen_timestamp = timestamp_now(); let mut prelim_results = task_spawner + .clone() .blocking_task(Priority::P0, move || { Ok(attestations .into_iter() @@ -253,7 +155,7 @@ pub async fn publish_attestations( Err(Error::Validation(AttestationError::UnknownHeadBlock { beacon_block_root, })) => { - let Some(reprocess_tx) = &reprocess_send else { + if !allow_reprocess { return PublishAttestationResult::Failure(Error::ReprocessDisabled); }; // Re-process. @@ -277,7 +179,13 @@ pub async fn publish_attestations( beacon_block_root, process_fn: Box::new(reprocess_fn), }); - if reprocess_tx.try_send(reprocess_msg).is_err() { + if task_spawner + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(reprocess_msg), + }) + .is_err() + { PublishAttestationResult::Failure(Error::ReprocessFull) } else { PublishAttestationResult::Reprocessing(rx) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9b1a3f8677..75979bbb1d 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -138,8 +138,7 @@ pub async fn publish_block>( spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs)?; // Gossip verify the block and blobs/data columns separately. - let gossip_verified_block_result = unverified_block - .into_gossip_verified_block(&chain, network_globals.custody_columns_count() as usize); + let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); let block_root = block_root.unwrap_or_else(|| { gossip_verified_block_result.as_ref().map_or_else( |_| block.canonical_root(), @@ -224,7 +223,7 @@ pub async fn publish_block>( publish_column_sidecars(network_tx, &gossip_verified_columns, &chain).map_err(|_| { warp_utils::reject::custom_server_error("unable to publish data column sidecars".into()) })?; - let sampling_columns_indices = &network_globals.sampling_columns; + let sampling_columns_indices = &network_globals.sampling_columns(); let sampling_columns = gossip_verified_columns .into_iter() .flatten() @@ -303,11 +302,7 @@ pub async fn publish_block>( ); let import_result = Box::pin(chain.process_block( block_root, - RpcBlock::new_without_blobs( - Some(block_root), - block.clone(), - network_globals.custody_columns_count() as usize, - ), + RpcBlock::new_without_blobs(Some(block_root), block.clone()), NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, @@ -424,6 +419,14 @@ fn build_gossip_verified_data_columns( ); Ok(None) } + Err(GossipDataColumnError::PriorKnownUnpublished) => { + debug!( + column_index, + %slot, + "Data column for publication already known via the EL" + ); + Ok(None) + } Err(e) => { error!( column_index, diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index a679b294f6..834cd29971 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -30,6 +30,7 @@ impl Priority { } /// Spawns tasks on the `BeaconProcessor` or directly on the tokio executor. +#[derive(Clone)] pub struct TaskSpawner { /// Used to send tasks to the `BeaconProcessor`. The tokio executor will be /// used if this is `None`. @@ -155,6 +156,32 @@ impl TaskSpawner { .and_then(|x| x) } } + + pub fn try_send(&self, work_event: WorkEvent) -> Result<(), warp::Rejection> { + if let Some(beacon_processor_send) = &self.beacon_processor_send { + let error_message = match beacon_processor_send.try_send(work_event) { + Ok(()) => None, + Err(TrySendError::Full(_)) => { + Some("The task was dropped. The server is overloaded.") + } + Err(TrySendError::Closed(_)) => { + Some("The task was dropped. The server is shutting down.") + } + }; + + if let Some(error_message) = error_message { + return Err(warp_utils::reject::custom_server_error( + error_message.to_string(), + )); + }; + + Ok(()) + } else { + Err(warp_utils::reject::custom_server_error( + "The beacon processor is unavailable".to_string(), + )) + } + } } /// Send a task to the beacon processor and await execution. diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index f78a361dad..a52df6c863 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -103,6 +103,13 @@ impl InteractiveTester { tokio::spawn(server); + // Override the default timeout to 2s to timeouts on CI, as CI seems to require longer + // to process. The 1s timeouts for other tasks have been working for a long time, so we'll + // keep it as it is, as it may help identify a performance regression. + let timeouts = Timeouts { + default: Duration::from_secs(2), + ..Timeouts::set_all(Duration::from_secs(1)) + }; let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( "http://{}:{}", @@ -110,7 +117,7 @@ impl InteractiveTester { listening_socket.port() )) .unwrap(), - Timeouts::set_all(Duration::from_secs(1)), + timeouts, ); Self { @@ -188,8 +195,6 @@ pub async fn create_api_server_with_config( })); *network_globals.sync_state.write() = SyncState::Synced; - let eth1_service = eth1::Service::new(eth1::Config::default(), chain.spec.clone()).unwrap(); - let beacon_processor_config = BeaconProcessorConfig { // The number of workers must be greater than one. Tests which use the // builder workflow sometimes require an internal HTTP request in order @@ -201,12 +206,9 @@ pub async fn create_api_server_with_config( let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx, } = BeaconProcessorChannels::new(&beacon_processor_config); let beacon_processor_send = beacon_processor_tx; - let reprocess_send = work_reprocessing_tx.clone(); BeaconProcessor { network_globals: network_globals.clone(), executor: test_runtime.task_executor.clone(), @@ -215,8 +217,6 @@ pub async fn create_api_server_with_config( } .spawn_manager( beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx, None, chain.slot_clock.clone(), chain.spec.maximum_gossip_clock_disparity(), @@ -241,8 +241,6 @@ pub async fn create_api_server_with_config( network_senders: Some(network_senders), network_globals: Some(network_globals), beacon_processor_send: Some(beacon_processor_send), - beacon_processor_reprocess_send: Some(reprocess_send), - eth1_service: Some(eth1_service), sse_logging_components: None, }); diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs index 90ddd1ee8f..755b540502 100644 --- a/beacon_node/http_api/src/validators.rs +++ b/beacon_node/http_api/src/validators.rs @@ -2,7 +2,7 @@ use crate::state_id::StateId; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::{ self as api_types, ExecutionOptimisticFinalizedResponse, ValidatorBalanceData, ValidatorData, - ValidatorId, ValidatorStatus, + ValidatorId, ValidatorIdentityData, ValidatorStatus, }; use std::{collections::HashSet, sync::Arc}; @@ -119,3 +119,51 @@ pub fn get_beacon_state_validator_balances( finalized: Some(finalized), }) } + +pub fn get_beacon_state_validator_identities( + state_id: StateId, + chain: Arc>, + optional_ids: Option<&[ValidatorId]>, +) -> Result>, warp::Rejection> { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let ids_filter_set: Option> = match optional_ids { + // Same logic as validator_balances endpoint above + Some([]) => None, + Some(ids) => Some(HashSet::from_iter(ids.iter())), + None => None, + }; + + Ok(( + // From the BeaconState, extract the Validator data and convert it into ValidatorIdentityData type + state + .validators() + .iter() + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, validator)| { + ids_filter_set.as_ref().is_none_or(|ids_set| { + ids_set.contains(&ValidatorId::PublicKey(validator.pubkey)) + || ids_set.contains(&ValidatorId::Index(*index as u64)) + }) + }) + .map(|(index, validator)| ValidatorIdentityData { + index: index as u64, + pubkey: validator.pubkey, + activation_epoch: validator.activation_epoch, + }) + .collect::>(), + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) +} diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index cd590580be..28b81c2bda 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -39,9 +39,6 @@ type E = MainnetEthSpec; * */ -// Default custody group count for tests -const CGC: usize = 8; - /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn gossip_invalid() { @@ -320,7 +317,7 @@ pub async fn consensus_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective, but nonetheless equivocates, is accepted when using `broadcast_validation=consensus`. @@ -367,9 +364,9 @@ pub async fn consensus_partial_pass_only_consensus() { ); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain, CGC); + let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_b.is_ok()); - let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain, CGC); + let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_a.is_err()); /* submit `block_b` which should induce equivocation */ @@ -607,7 +604,7 @@ pub async fn equivocation_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective but @@ -657,10 +654,10 @@ pub async fn equivocation_consensus_late_equivocation() { ); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain, CGC); + let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_b.is_ok()); - let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain, CGC); + let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); @@ -1005,7 +1002,7 @@ pub async fn blinded_consensus_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. @@ -1215,7 +1212,7 @@ pub async fn blinded_equivocation_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and @@ -1294,9 +1291,9 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { ProvenancedBlock::Builder(b, _, _) => b, }; - let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain, CGC); + let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); assert!(gossip_block_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain, CGC); + let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); @@ -1398,7 +1395,7 @@ pub async fn block_seen_on_gossip_without_blobs() { // Simulate the block being seen on gossip. block .clone() - .into_gossip_verified_block(&tester.harness.chain, CGC) + .into_gossip_verified_block(&tester.harness.chain) .unwrap(); // It should not yet be added to fork choice because blobs have not been seen. @@ -1467,7 +1464,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { // Simulate the block being seen on gossip. block .clone() - .into_gossip_verified_block(&tester.harness.chain, CGC) + .into_gossip_verified_block(&tester.harness.chain) .unwrap(); // Simulate some of the blobs being seen on gossip. @@ -1786,6 +1783,5 @@ fn get_custody_columns(tester: &InteractiveTester) -> HashSet { .network_globals .as_ref() .unwrap() - .sampling_columns - .clone() + .sampling_columns() } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 10e1d01536..dcc6d13ec4 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -149,10 +149,41 @@ async fn attestations_across_fork_with_skip_slots() { .flat_map(|(atts, _)| atts.iter().map(|(att, _)| att.clone())) .collect::>(); + let unaggregated_attestations = unaggregated_attestations + .into_iter() + .map(|attn| { + let aggregation_bits = attn.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = fork_state + .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + attn.to_single_attestation_with_attester_index(attester_index as u64) + .unwrap() + }) + .collect::>(); + assert!(!unaggregated_attestations.is_empty()); let fork_name = harness.spec.fork_name_at_slot::(fork_slot); client - .post_beacon_pool_attestations_v1(&unaggregated_attestations) + .post_beacon_pool_attestations_v2::(unaggregated_attestations, fork_name) .await .unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 4f3cd6c828..1a31f1398a 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -4,8 +4,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, ChainConfig, }; -use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; -use either::Either; +use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, Work, WorkEvent}; use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; @@ -539,7 +538,7 @@ pub async fn proposer_boost_re_org_test( slot_a, num_parent_votes, ); - harness.process_attestations(block_a_parent_votes); + harness.process_attestations(block_a_parent_votes, &state_a); // Attest to block A during slot B. for _ in 0..parent_distance { @@ -553,7 +552,7 @@ pub async fn proposer_boost_re_org_test( slot_b, num_empty_votes, ); - harness.process_attestations(block_a_empty_votes); + harness.process_attestations(block_a_empty_votes, &state_a); let remaining_attesters = all_validators .iter() @@ -586,7 +585,7 @@ pub async fn proposer_boost_re_org_test( slot_b, num_head_votes, ); - harness.process_attestations(block_b_head_votes); + harness.process_attestations(block_b_head_votes, &state_b); let payload_lookahead = harness.chain.config.prepare_payload_lookahead; let fork_choice_lookahead = Duration::from_millis(500); @@ -818,10 +817,10 @@ pub async fn fork_choice_before_proposal() { block_root_c, slot_c, ); - harness.process_attestations(attestations_c); + harness.process_attestations(attestations_c, &state_c); // Apply the attestations to B, but don't re-run fork choice. - harness.process_attestations(attestations_b); + harness.process_attestations(attestations_b, &state_b); // Due to proposer boost, the head should be C during slot C. assert_eq!( @@ -894,7 +893,7 @@ async fn queue_attestations_from_http() { let fork_name = tester.harness.spec.fork_name_at_slot::(attestation_slot); // Make attestations to the block and POST them to the beacon node on a background thread. - let attestation_future = if fork_name.electra_enabled() { + let attestation_future = { let single_attestations = harness .make_single_attestations( &all_validators, @@ -907,30 +906,9 @@ async fn queue_attestations_from_http() { .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) .collect::>(); - let attestations = Either::Right(single_attestations); - tokio::spawn(async move { client - .post_beacon_pool_attestations_v2::(attestations, fork_name) - .await - .expect("attestations should be processed successfully") - }) - } else { - let attestations = harness - .make_unaggregated_attestations( - &all_validators, - &post_state, - block.0.state_root(), - block_root.into(), - attestation_slot, - ) - .into_iter() - .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) - .collect::>(); - - tokio::spawn(async move { - client - .post_beacon_pool_attestations_v1(&attestations) + .post_beacon_pool_attestations_v2::(single_attestations, fork_name) .await .expect("attestations should be processed successfully") }) @@ -945,14 +923,16 @@ async fn queue_attestations_from_http() { .unwrap(); tester .ctx - .beacon_processor_reprocess_send + .beacon_processor_send .as_ref() .unwrap() - .send(ReprocessQueueMessage::BlockImported { - block_root, - parent_root, + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::BlockImported { + block_root, + parent_root, + }), }) - .await .unwrap(); attestation_future.await.unwrap(); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a5a21fd985..955b44c36c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3,7 +3,6 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; -use either::Either; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, @@ -138,7 +137,7 @@ impl ApiTester { .deterministic_keypairs(VALIDATOR_COUNT) .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_config() + .mock_execution_layer() .build(); harness @@ -965,6 +964,87 @@ impl ApiTester { self } + pub async fn test_beacon_states_validator_identities(self) -> Self { + for state_id in self.interesting_state_ids() { + for validator_indices in self.interesting_validator_indices() { + let state_opt = state_id.state(&self.chain).ok(); + let validators: Vec = match state_opt.as_ref() { + Some((state, _execution_optimistic, _finalized)) => { + state.validators().clone().to_vec() + } + None => vec![], + }; + + let validator_index_ids = validator_indices + .iter() + .cloned() + .map(ValidatorId::Index) + .collect::>(); + + let validator_pubkey_ids = validator_indices + .iter() + .cloned() + .map(|i| { + ValidatorId::PublicKey( + validators + .get(i as usize) + .map_or(PublicKeyBytes::empty(), |val| val.pubkey), + ) + }) + .collect::>(); + + let result_index_ids = self + .client + .post_beacon_states_validator_identities(state_id.0, validator_index_ids) + .await + .unwrap() + .map(|res| res.data); + let result_pubkey_ids = self + .client + .post_beacon_states_validator_identities(state_id.0, validator_pubkey_ids) + .await + .unwrap() + .map(|res| res.data); + + let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { + // If validator_indices is empty, return identities for all validators + if validator_indices.is_empty() { + state + .validators() + .iter() + .enumerate() + .map(|(index, validator)| ValidatorIdentityData { + index: index as u64, + pubkey: validator.pubkey, + activation_epoch: validator.activation_epoch, + }) + .collect() + } else { + let mut validators = Vec::with_capacity(validator_indices.len()); + + for i in validator_indices { + if i < state.validators().len() as u64 { + // access each validator, and then transform the data into ValidatorIdentityData + let validator = state.validators().get(i as usize).unwrap(); + validators.push(ValidatorIdentityData { + index: i, + pubkey: validator.pubkey, + activation_epoch: validator.activation_epoch, + }); + } + } + + validators + } + }); + + assert_eq!(result_index_ids, expected, "{:?}", state_id); + assert_eq!(result_pubkey_ids, expected, "{:?}", state_id); + } + } + self + } + pub async fn test_beacon_states_validators(self) -> Self { for state_id in self.interesting_state_ids() { for statuses in self.interesting_validator_statuses() { @@ -1907,18 +1987,46 @@ impl ApiTester { } pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { - self.client - .post_beacon_pool_attestations_v1(self.attestations.as_slice()) - .await - .unwrap(); - let fork_name = self .attestations .first() .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) .unwrap(); - let attestations = Either::Left(self.attestations.clone()); + let state = &self.chain.head_snapshot().beacon_state; + + let attestations = self + .attestations + .clone() + .into_iter() + .map(|attn| { + let aggregation_bits = attn.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = state + .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + attn.to_single_attestation_with_attester_index(attester_index as u64) + .unwrap() + }) + .collect::>(); self.client .post_beacon_pool_attestations_v2::(attestations, fork_name) @@ -1943,9 +2051,8 @@ impl ApiTester { .map(|att| self.chain.spec.fork_name_at_slot::(att.data.slot)) .unwrap(); - let attestations = Either::Right(self.single_attestations.clone()); self.client - .post_beacon_pool_attestations_v2::(attestations, fork_name) + .post_beacon_pool_attestations_v2::(self.single_attestations.clone(), fork_name) .await .unwrap(); assert!( @@ -1958,18 +2065,87 @@ impl ApiTester { pub async fn test_post_beacon_pool_attestations_invalid_v1(mut self) -> Self { let mut attestations = Vec::new(); + let state = &self.chain.head_snapshot().beacon_state; for attestation in &self.attestations { let mut invalid_attestation = attestation.clone(); invalid_attestation.data_mut().slot += 1; + // Convert valid attestation into valid `SingleAttestation` + let aggregation_bits = attestation.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = state + .get_beacon_committee( + attestation.data().slot, + attestation.committee_index().unwrap(), + ) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + let attestation = attestation + .to_single_attestation_with_attester_index(attester_index as u64) + .unwrap(); + + // Convert invalid attestation to invalid `SingleAttestation` + let aggregation_bits = invalid_attestation.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = state + .get_beacon_committee( + invalid_attestation.data().slot, + invalid_attestation.committee_index().unwrap(), + ) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + let invalid_attestation = invalid_attestation + .to_single_attestation_with_attester_index(attester_index as u64) + .unwrap(); + // add both to ensure we only fail on invalid attestations attestations.push(attestation.clone()); attestations.push(invalid_attestation); } + let fork_name = self + .attestations + .first() + .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) + .unwrap(); + let err = self .client - .post_beacon_pool_attestations_v1(attestations.as_slice()) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap_err(); @@ -2011,7 +2187,6 @@ impl ApiTester { .first() .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) .unwrap(); - let attestations = Either::Right(attestations); let err_v2 = self .client .post_beacon_pool_attestations_v2::(attestations, fork_name) @@ -4177,9 +4352,47 @@ impl ApiTester { assert_eq!(result, expected); + let attestations = self + .attestations + .clone() + .into_iter() + .map(|attn| { + let aggregation_bits = attn.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = head_state + .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + attn.to_single_attestation_with_attester_index(attester_index as u64) + .unwrap() + }) + .collect::>(); + + let fork_name = self + .chain + .spec + .fork_name_at_slot::(attestations.first().unwrap().data.slot); + // Attest to the current slot self.client - .post_beacon_pool_attestations_v1(self.attestations.as_slice()) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); @@ -5823,40 +6036,6 @@ impl ApiTester { self } - pub async fn test_get_lighthouse_eth1_syncing(self) -> Self { - self.client.get_lighthouse_eth1_syncing().await.unwrap(); - - self - } - - pub async fn test_get_lighthouse_eth1_block_cache(self) -> Self { - let blocks = self.client.get_lighthouse_eth1_block_cache().await.unwrap(); - - assert!(blocks.data.is_empty()); - - self - } - - pub async fn test_get_lighthouse_eth1_deposit_cache(self) -> Self { - let deposits = self - .client - .get_lighthouse_eth1_deposit_cache() - .await - .unwrap(); - - assert!(deposits.data.is_empty()); - - self - } - - pub async fn test_get_lighthouse_staking(self) -> Self { - let result = self.client.get_lighthouse_staking().await.unwrap(); - - assert_eq!(result, self.chain.eth1_chain.is_some()); - - self - } - pub async fn test_post_lighthouse_database_reconstruct(self) -> Self { let response = self .client @@ -5916,9 +6095,47 @@ impl ApiTester { assert_eq!(result, expected); + let attestations = self + .attestations + .clone() + .into_iter() + .map(|attn| { + let aggregation_bits = attn.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = head_state + .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + attn.to_single_attestation_with_attester_index(attester_index as u64) + .unwrap() + }) + .collect::>(); + + let fork_name = self + .chain + .spec + .fork_name_at_slot::(attestations.first().unwrap().data.slot); + // Attest to the current slot self.client - .post_beacon_pool_attestations_v1(self.attestations.as_slice()) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); @@ -5973,8 +6190,47 @@ impl ApiTester { let expected_attestation_len = self.attestations.len(); + let state = self.harness.get_current_state(); + let attestations = self + .attestations + .clone() + .into_iter() + .map(|attn| { + let aggregation_bits = attn.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = state + .get_beacon_committee(attn.data().slot, attn.committee_index().unwrap()) + .unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + attn.to_single_attestation_with_attester_index(attester_index as u64) + .unwrap() + }) + .collect::>(); + + let fork_name = self + .chain + .spec + .fork_name_at_slot::(attestations.first().unwrap().data.slot); + self.client - .post_beacon_pool_attestations_v1(self.attestations.as_slice()) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); @@ -6247,9 +6503,9 @@ impl ApiTester { .chain .spec .fork_name_at_slot::(self.chain.slot().unwrap()); - let attestations = Either::Right(self.single_attestations.clone()); + self.client - .post_beacon_pool_attestations_v2::(attestations, fork_name) + .post_beacon_pool_attestations_v2::(self.single_attestations.clone(), fork_name) .await .unwrap(); @@ -6510,6 +6766,8 @@ async fn beacon_get_state_info() { .await .test_beacon_states_validator_balances() .await + .test_beacon_states_validator_identities() + .await .test_beacon_states_committees() .await .test_beacon_states_validator_id() @@ -7491,14 +7749,6 @@ async fn lighthouse_endpoints() { .await .test_get_lighthouse_validator_inclusion_global() .await - .test_get_lighthouse_eth1_syncing() - .await - .test_get_lighthouse_eth1_block_cache() - .await - .test_get_lighthouse_eth1_deposit_cache() - .await - .test_get_lighthouse_staking() - .await .test_post_lighthouse_database_reconstruct() .await .test_post_lighthouse_liveness() diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 89d260569a..bd72a5d51a 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -139,6 +139,9 @@ pub struct Config { /// Configuration for the minimum message size for which IDONTWANT messages are send in the mesh. /// Lower the value reduces the optimization effect of the IDONTWANT messages. pub idontwant_message_size_threshold: usize, + + /// Flag for advertising a fake CGC to peers for testing ONLY. + pub advertise_false_custody_group_count: Option, } impl Config { @@ -363,6 +366,7 @@ impl Default for Config { invalid_block_storage: None, inbound_rate_limiter_config: None, idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, + advertise_false_custody_group_count: None, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index e70c8047e0..5628d5c463 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -259,11 +259,14 @@ pub fn build_enr( // only set `cgc` if PeerDAS fork epoch has been scheduled if spec.is_peer_das_scheduled() { - let custody_group_count = if config.subscribe_all_data_column_subnets { - spec.number_of_custody_groups - } else { - spec.custody_requirement - }; + let custody_group_count = + if let Some(false_cgc) = config.advertise_false_custody_group_count { + false_cgc + } else if config.subscribe_all_data_column_subnets { + spec.number_of_custody_groups + } else { + spec.custody_requirement + }; builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index ad54c6b8b1..ad4241c5b7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -49,6 +49,7 @@ use tracing::{debug, error, info, trace, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; mod subnet_predicate; +use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; pub use subnet_predicate::subnet_predicate; use types::non_zero_usize::new_non_zero_usize; @@ -476,6 +477,15 @@ impl Discovery { Ok(()) } + pub fn update_enr_cgc(&mut self, custody_group_count: u64) -> Result<(), String> { + self.discv5 + .enr_insert(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count) + .map_err(|e| format!("{:?}", e))?; + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); + *self.network_globals.local_enr.write() = self.discv5.local_enr(); + Ok(()) + } + /// Adds/Removes a subnet from the ENR attnets/syncnets Bitfield pub fn update_enr_bitfield(&mut self, subnet: Subnet, value: bool) -> Result<(), String> { let local_enr = self.discv5.local_enr(); diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 1ad55ce5c4..a45b941e58 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -106,14 +106,14 @@ impl NetworkBehaviour for PeerManager { if let Some(enr) = self.peers_to_dial.pop() { self.inject_peer_connection(&enr.peer_id(), ConnectingType::Dialing, Some(enr.clone())); + let multiaddr_quic = if self.quic_enabled { + enr.multiaddr_quic() + } else { + vec![] + }; + // Prioritize Quic connections over Tcp ones. - let multiaddrs = [ - self.quic_enabled - .then_some(enr.multiaddr_quic()) - .unwrap_or_default(), - enr.multiaddr_tcp(), - ] - .concat(); + let multiaddrs = [multiaddr_quic, enr.multiaddr_tcp()].concat(); debug!(peer_id = %enr.peer_id(), ?multiaddrs, "Dialing peer"); return Poll::Ready(ToSwarm::Dial { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 95a4e82fa2..b28807c47e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -746,6 +746,7 @@ impl PeerDB { head_root: Hash256::ZERO, finalized_epoch: Epoch::new(0), finalized_root: Hash256::ZERO, + earliest_available_slot: Some(Slot::new(0)), }, }, ); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs index bab8aa9aeb..5a4fc33994 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs @@ -25,6 +25,7 @@ pub struct SyncInfo { pub head_root: Hash256, pub finalized_epoch: Epoch, pub finalized_root: Hash256, + pub earliest_available_slot: Option, } impl std::cmp::PartialEq for SyncStatus { diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index f24074118e..f638dd5615 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -67,7 +67,13 @@ impl SSZSnappyInboundCodec { ) -> Result<(), RPCError> { let bytes = match &item { RpcResponse::Success(resp) => match &resp { - RpcSuccessResponse::Status(res) => res.as_ssz_bytes(), + RpcSuccessResponse::Status(res) => match self.protocol.versioned_protocol { + SupportedProtocol::StatusV1 => res.status_v1().as_ssz_bytes(), + SupportedProtocol::StatusV2 => res.status_v2().as_ssz_bytes(), + _ => { + unreachable!("We only send status responses on negotiating status protocol") + } + }, RpcSuccessResponse::BlocksByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::BlobsByRange(res) => res.as_ssz_bytes(), @@ -329,7 +335,16 @@ impl Encoder> for SSZSnappyOutboundCodec { fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { - RequestType::Status(req) => req.as_ssz_bytes(), + RequestType::Status(req) => { + // Send the status message based on the negotiated protocol + match self.protocol.versioned_protocol { + SupportedProtocol::StatusV1 => req.status_v1().as_ssz_bytes(), + SupportedProtocol::StatusV2 => req.status_v2().as_ssz_bytes(), + _ => { + unreachable!("We only send status requests on negotiating status protocol") + } + } + } RequestType::Goodbye(req) => req.as_ssz_bytes(), RequestType::BlocksByRange(r) => match r { OldBlocksByRangeRequest::V1(req) => req.as_ssz_bytes(), @@ -553,9 +568,12 @@ fn handle_rpc_request( spec: &ChainSpec, ) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( - StatusMessage::from_ssz_bytes(decoded_buffer)?, - ))), + SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status(StatusMessage::V1( + StatusMessageV1::from_ssz_bytes(decoded_buffer)?, + )))), + SupportedProtocol::StatusV2 => Ok(Some(RequestType::Status(StatusMessage::V2( + StatusMessageV2::from_ssz_bytes(decoded_buffer)?, + )))), SupportedProtocol::GoodbyeV1 => Ok(Some(RequestType::Goodbye( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), @@ -666,9 +684,12 @@ fn handle_rpc_response( fork_name: Option, ) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(RpcSuccessResponse::Status( - StatusMessage::from_ssz_bytes(decoded_buffer)?, - ))), + SupportedProtocol::StatusV1 => Ok(Some(RpcSuccessResponse::Status(StatusMessage::V1( + StatusMessageV1::from_ssz_bytes(decoded_buffer)?, + )))), + SupportedProtocol::StatusV2 => Ok(Some(RpcSuccessResponse::Status(StatusMessage::V2( + StatusMessageV2::from_ssz_bytes(decoded_buffer)?, + )))), // This case should be unreachable as `Goodbye` has no response. SupportedProtocol::GoodbyeV1 => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), @@ -1036,14 +1057,25 @@ mod tests { SignedBeaconBlock::from_block(block, Signature::empty()) } - fn status_message() -> StatusMessage { - StatusMessage { + fn status_message_v1() -> StatusMessage { + StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), head_root: Hash256::zero(), head_slot: Slot::new(1), - } + }) + } + + fn status_message_v2() -> StatusMessage { + StatusMessage::V2(StatusMessageV2 { + fork_digest: [0; 4], + finalized_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + head_root: Hash256::zero(), + head_slot: Slot::new(1), + earliest_available_slot: Slot::new(0), + }) } fn bbrange_request_v1() -> OldBlocksByRangeRequest { @@ -1284,11 +1316,22 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::StatusV1, - RpcResponse::Success(RpcSuccessResponse::Status(status_message())), + RpcResponse::Success(RpcSuccessResponse::Status(status_message_v1())), ForkName::Base, &chain_spec, ), - Ok(Some(RpcSuccessResponse::Status(status_message()))) + Ok(Some(RpcSuccessResponse::Status(status_message_v1()))) + ); + + // A StatusV2 still encodes as a StatusV1 since version is Version::V1 + assert_eq!( + encode_then_decode_response( + SupportedProtocol::StatusV1, + RpcResponse::Success(RpcSuccessResponse::Status(status_message_v2())), + ForkName::Fulu, + &chain_spec, + ), + Ok(Some(RpcSuccessResponse::Status(status_message_v1()))) ); assert_eq!( @@ -1716,6 +1759,27 @@ mod tests { ), Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); + + // A StatusV1 still encodes as a StatusV2 since version is Version::V2 + assert_eq!( + encode_then_decode_response( + SupportedProtocol::StatusV2, + RpcResponse::Success(RpcSuccessResponse::Status(status_message_v1())), + ForkName::Fulu, + &chain_spec, + ), + Ok(Some(RpcSuccessResponse::Status(status_message_v2()))) + ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::StatusV2, + RpcResponse::Success(RpcSuccessResponse::Status(status_message_v2())), + ForkName::Fulu, + &chain_spec, + ), + Ok(Some(RpcSuccessResponse::Status(status_message_v2()))) + ); } // Test RPCResponse encoding/decoding for V2 messages @@ -1901,7 +1965,8 @@ mod tests { let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), - RequestType::Status(status_message()), + RequestType::Status(status_message_v1()), + RequestType::Status(status_message_v2()), RequestType::Goodbye(GoodbyeReason::Fault), RequestType::BlocksByRange(bbrange_request_v1()), RequestType::BlocksByRange(bbrange_request_v2()), @@ -1948,7 +2013,7 @@ mod tests { let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. - let status_message_bytes = StatusMessage { + let status_message_bytes = StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), @@ -2071,7 +2136,7 @@ mod tests { assert_eq!(stream_identifier.len(), 10); // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. - let status_message_bytes = StatusMessage { + let status_message_bytes = StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 75d49e9cb5..7a746a63e1 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -1,11 +1,11 @@ +use super::{rate_limiter::Quota, Protocol}; +use std::num::NonZeroU64; use std::{ fmt::{Debug, Display}, str::FromStr, time::Duration, }; -use super::{rate_limiter::Quota, Protocol}; - use serde::{Deserialize, Serialize}; /// Auxiliary struct to aid on configuration parsing. @@ -100,24 +100,30 @@ pub struct RateLimiterConfig { } impl RateLimiterConfig { - pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); - pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); - pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); + pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(NonZeroU64::new(2).unwrap(), 10); + pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(NonZeroU64::new(2).unwrap(), 5); + pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(NonZeroU64::new(5).unwrap(), 15); pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); // The number is chosen to balance between upload bandwidth required to serve // blocks and a decent syncing rate for honest nodes. Malicious nodes would need to // spread out their requests over the time window to max out bandwidth on the server. - pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); - pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(896).unwrap(), 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(896).unwrap(), 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. - pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); + pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(5120).unwrap(), 10); // 512 columns per request from spec. This should be plenty as peers are unlikely to send all // sampling requests to a single peer. - pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(512).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -275,7 +281,7 @@ mod tests { protocol: Protocol::Goodbye, quota: Quota { replenish_all_every: Duration::from_secs(10), - max_tokens: 8, + max_tokens: NonZeroU64::new(8).unwrap(), }, }; assert_eq!(quota.to_string().parse(), Ok(quota)) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 33c5521c3b..396d390b00 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -28,7 +28,7 @@ use std::{ use tokio::time::{sleep, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; use tracing::{debug, trace}; -use types::{EthSpec, ForkContext}; +use types::{EthSpec, ForkContext, Slot}; /// The number of times to retry an outbound upgrade in the case of IO errors. const IO_ERROR_RETRIES: u8 = 3; @@ -377,7 +377,7 @@ where ConnectionHandlerEvent, > { if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { + if !waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); } } else { @@ -932,9 +932,8 @@ where } } RequestType::BlobsByRange(request) => { - let max_requested_blobs = request - .count - .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + let epoch = Slot::new(request.start_slot).epoch(E::slots_per_epoch()); + let max_requested_blobs = request.max_blobs_requested(epoch, spec); let max_allowed = spec.max_request_blob_sidecars(current_fork) as u64; if max_requested_blobs > max_allowed { self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9fe2fef9e8..74cfc6d198 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -16,11 +16,10 @@ use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnsByRootIdentifier, Epoch, EthSpec, Hash256, LightClientBootstrap, + DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; -use types::{ForkContext, ForkName}; /// Maximum length of error message. pub type MaxErrorLen = U256; @@ -64,7 +63,11 @@ impl Display for ErrorType { /* Requests */ /// The STATUS request/response handshake message. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq),) +)] +#[derive(Clone, Debug, PartialEq)] pub struct StatusMessage { /// The fork version of the chain we are broadcasting. pub fork_digest: [u8; 4], @@ -80,6 +83,43 @@ pub struct StatusMessage { /// The slot associated with the latest block root. pub head_slot: Slot, + + /// The slot after which we guarantee to have all the blocks + /// and blobs/data columns that we currently advertise. + #[superstruct(only(V2))] + pub earliest_available_slot: Slot, +} + +impl StatusMessage { + pub fn status_v1(&self) -> StatusMessageV1 { + match &self { + Self::V1(status) => status.clone(), + Self::V2(status) => StatusMessageV1 { + fork_digest: status.fork_digest, + finalized_root: status.finalized_root, + finalized_epoch: status.finalized_epoch, + head_root: status.head_root, + head_slot: status.head_slot, + }, + } + } + + pub fn status_v2(&self) -> StatusMessageV2 { + match &self { + Self::V1(status) => StatusMessageV2 { + fork_digest: status.fork_digest, + finalized_root: status.finalized_root, + finalized_epoch: status.finalized_epoch, + head_root: status.head_root, + head_slot: status.head_slot, + // Note: we always produce a V2 message as our local + // status message, so this match arm should ideally never + // be invoked in lighthouse. + earliest_available_slot: Slot::new(0), + }, + Self::V2(status) => status.clone(), + } + } } /// The PING request/response message. @@ -328,8 +368,8 @@ pub struct BlobsByRangeRequest { } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { - let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork); + pub fn max_blobs_requested(&self, epoch: Epoch, spec: &ChainSpec) -> u64 { + let max_blobs_per_block = spec.max_blobs_per_block(epoch); self.count.saturating_mul(max_blobs_per_block) } } @@ -727,7 +767,7 @@ impl std::fmt::Display for RpcErrorResponse { impl std::fmt::Display for StatusMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_digest, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot) + write!(f, "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}, Earliest available slot: {:?}", self.fork_digest(), self.finalized_root(), self.finalized_epoch(), self.head_root(), self.head_slot(), self.earliest_available_slot()) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 8cb720132a..0619908bb6 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -21,9 +21,7 @@ use tracing::{debug, error, instrument, trace}; use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; -pub(crate) use methods::{ - MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RpcResponse, RpcSuccessResponse, -}; +pub(crate) use methods::{MetaData, MetaDataV2, MetaDataV3, Ping, RpcResponse, RpcSuccessResponse}; pub use protocol::RequestType; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 820f50ac93..8f613dcbf9 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,7 @@ use types::{ EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, + MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -298,6 +298,7 @@ pub enum Encoding { #[derive(Debug, Clone, Copy, PartialEq)] pub enum SupportedProtocol { StatusV1, + StatusV2, GoodbyeV1, BlocksByRangeV1, BlocksByRangeV2, @@ -321,6 +322,7 @@ impl SupportedProtocol { pub fn version_string(&self) -> &'static str { match self { SupportedProtocol::StatusV1 => "1", + SupportedProtocol::StatusV2 => "2", SupportedProtocol::GoodbyeV1 => "1", SupportedProtocol::BlocksByRangeV1 => "1", SupportedProtocol::BlocksByRangeV2 => "2", @@ -344,6 +346,7 @@ impl SupportedProtocol { pub fn protocol(&self) -> Protocol { match self { SupportedProtocol::StatusV1 => Protocol::Status, + SupportedProtocol::StatusV2 => Protocol::Status, SupportedProtocol::GoodbyeV1 => Protocol::Goodbye, SupportedProtocol::BlocksByRangeV1 => Protocol::BlocksByRange, SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange, @@ -368,6 +371,7 @@ impl SupportedProtocol { fn currently_supported(fork_context: &ForkContext) -> Vec { let mut supported = vec![ + ProtocolId::new(Self::StatusV2, Encoding::SSZSnappy), ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy), ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -492,8 +496,8 @@ impl ProtocolId { pub fn rpc_request_limits(&self, spec: &ChainSpec) -> RpcLimits { match self.versioned_protocol.protocol() { Protocol::Status => RpcLimits::new( - ::ssz_fixed_len(), - ::ssz_fixed_len(), + ::ssz_fixed_len(), + ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new( ::ssz_fixed_len(), @@ -537,8 +541,8 @@ impl ProtocolId { pub fn rpc_response_limits(&self, fork_context: &ForkContext) -> RpcLimits { match self.versioned_protocol.protocol() { Protocol::Status => RpcLimits::new( - ::ssz_fixed_len(), - ::ssz_fixed_len(), + ::ssz_fixed_len(), + ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), @@ -589,6 +593,7 @@ impl ProtocolId { | SupportedProtocol::LightClientFinalityUpdateV1 | SupportedProtocol::LightClientUpdatesByRangeV1 => true, SupportedProtocol::StatusV1 + | SupportedProtocol::StatusV2 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 | SupportedProtocol::PingV1 @@ -633,7 +638,8 @@ pub fn rpc_blob_limits() -> RpcLimits { pub fn rpc_data_column_limits(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::min_size(), - DataColumnSidecar::::max_size(spec.max_blobs_per_block_by_fork(fork_name) as usize), + // TODO(EIP-7892): fix this once we change fork-version on BPO forks + DataColumnSidecar::::max_size(spec.max_blobs_per_block_within_fork(fork_name) as usize), ) } @@ -732,13 +738,16 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + /// TODO(EIP-7892): refactor this to remove `_current_fork` + pub fn max_responses(&self, _current_fork: ForkName, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec), + RequestType::BlobsByRange(req) => { + req.max_blobs_requested(Slot::new(req.start_slot).epoch(E::slots_per_epoch()), spec) + } RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), @@ -754,7 +763,10 @@ impl RequestType { /// Gives the corresponding `SupportedProtocol` to this request. pub fn versioned_protocol(&self) -> SupportedProtocol { match self { - RequestType::Status(_) => SupportedProtocol::StatusV1, + RequestType::Status(req) => match req { + StatusMessage::V1(_) => SupportedProtocol::StatusV1, + StatusMessage::V2(_) => SupportedProtocol::StatusV2, + }, RequestType::Goodbye(_) => SupportedProtocol::GoodbyeV1, RequestType::BlocksByRange(req) => match req { OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, @@ -813,10 +825,10 @@ impl RequestType { pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RequestType::Status(_) => vec![ProtocolId::new( - SupportedProtocol::StatusV1, - Encoding::SSZSnappy, - )], + RequestType::Status(_) => vec![ + ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::StatusV2, Encoding::SSZSnappy), + ], RequestType::Goodbye(_) => vec![ProtocolId::new( SupportedProtocol::GoodbyeV1, Encoding::SSZSnappy, diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index f666c30d52..6e66999612 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,3 +1,5 @@ +#![deny(clippy::arithmetic_side_effects)] + use super::config::RateLimiterConfig; use crate::rpc::Protocol; use fnv::FnvHashMap; @@ -5,6 +7,7 @@ use libp2p::PeerId; use serde::{Deserialize, Serialize}; use std::future::Future; use std::hash::Hash; +use std::num::NonZeroU64; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; @@ -55,7 +58,7 @@ pub struct Quota { pub(super) replenish_all_every: Duration, /// Token limit. This translates on how large can an instantaneous batch of /// tokens be. - pub(super) max_tokens: u64, + pub(super) max_tokens: NonZeroU64, } impl Quota { @@ -63,12 +66,12 @@ impl Quota { pub const fn one_every(seconds: u64) -> Self { Quota { replenish_all_every: Duration::from_secs(seconds), - max_tokens: 1, + max_tokens: NonZeroU64::new(1).unwrap(), } } /// Allow `n` tokens to be use used every `seconds`. - pub const fn n_every(n: u64, seconds: u64) -> Self { + pub const fn n_every(n: NonZeroU64, seconds: u64) -> Self { Quota { replenish_all_every: Duration::from_secs(seconds), max_tokens: n, @@ -236,7 +239,9 @@ impl RPCRateLimiterBuilder { // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); - let prune_start = tokio::time::Instant::now() + prune_every; + let prune_start = tokio::time::Instant::now() + .checked_add(prune_every) + .ok_or("prune time overflow")?; let prune_interval = tokio::time::interval_at(prune_start, prune_every); Ok(RPCRateLimiter { prune_interval, @@ -412,14 +417,13 @@ pub struct Limiter { impl Limiter { pub fn from_quota(quota: Quota) -> Result { - if quota.max_tokens == 0 { - return Err("Max number of tokens should be positive"); - } let tau = quota.replenish_all_every.as_nanos(); if tau == 0 { return Err("Replenish time must be positive"); } - let t = (tau / quota.max_tokens as u128) + let t = tau + .checked_div(quota.max_tokens.get() as u128) + .expect("Division by zero never occurs, since Quota::max_token is of type NonZeroU64.") .try_into() .map_err(|_| "total replenish time is too long")?; let tau = tau @@ -442,7 +446,7 @@ impl Limiter { let tau = self.tau; let t = self.t; // how long does it take to replenish these tokens - let additional_time = t * tokens; + let additional_time = t.saturating_mul(tokens); if additional_time > tau { // the time required to process this amount of tokens is longer than the time that // makes the bucket full. So, this batch can _never_ be processed @@ -455,16 +459,16 @@ impl Limiter { .entry(key.clone()) .or_insert(time_since_start); // check how soon could the request be made - let earliest_time = (*tat + additional_time).saturating_sub(tau); + let earliest_time = (*tat).saturating_add(additional_time).saturating_sub(tau); // earliest_time is in the future if time_since_start < earliest_time { Err(RateLimitedErr::TooSoon(Duration::from_nanos( /* time they need to wait, i.e. how soon were they */ - earliest_time - time_since_start, + earliest_time.saturating_sub(time_since_start), ))) } else { // calculate the new TAT - *tat = time_since_start.max(*tat) + additional_time; + *tat = time_since_start.max(*tat).saturating_add(additional_time); Ok(()) } } @@ -479,14 +483,15 @@ impl Limiter { #[cfg(test)] mod tests { - use crate::rpc::rate_limiter::{Limiter, Quota}; + use crate::rpc::rate_limiter::{Limiter, Quota, RateLimitedErr}; + use std::num::NonZeroU64; use std::time::Duration; #[test] fn it_works_a() { let mut limiter = Limiter::from_quota(Quota { replenish_all_every: Duration::from_secs(2), - max_tokens: 4, + max_tokens: NonZeroU64::new(4).unwrap(), }) .unwrap(); let key = 10; @@ -523,7 +528,7 @@ mod tests { fn it_works_b() { let mut limiter = Limiter::from_quota(Quota { replenish_all_every: Duration::from_secs(2), - max_tokens: 4, + max_tokens: NonZeroU64::new(4).unwrap(), }) .unwrap(); let key = 10; @@ -547,4 +552,22 @@ mod tests { .allows(Duration::from_secs_f32(0.4), &key, 1) .is_err()); } + + #[test] + fn large_tokens() { + // These have been adjusted so that an overflow occurs when calculating `additional_time` in + // `Limiter::allows`. If we don't handle overflow properly, `Limiter::allows` returns `Ok` + // in this case. + let replenish_all_every = 2; + let tokens = u64::MAX / 2 + 1; + + let mut limiter = Limiter::from_quota(Quota { + replenish_all_every: Duration::from_nanos(replenish_all_every), + max_tokens: NonZeroU64::new(1).unwrap(), + }) + .unwrap(); + + let result = limiter.allows(Duration::from_secs_f32(0.0), &10, tokens); + assert!(matches!(result, Err(RateLimitedErr::TooLarge))); + } } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e5b685676f..f26dc4c7a8 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -316,6 +316,7 @@ mod tests { use crate::service::api_types::{AppRequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; use logging::create_test_tracing_subscriber; + use std::num::NonZeroU64; use std::time::Duration; use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; @@ -324,7 +325,7 @@ mod tests { async fn test_next_peer_request_ready() { create_test_tracing_subscriber(); let config = OutboundRateLimiterConfig(RateLimiterConfig { - ping_quota: Quota::n_every(1, 2), + ping_quota: Quota::n_every(NonZeroU64::new(1).unwrap(), 2), ..Default::default() }); let fork_context = std::sync::Arc::new(ForkContext::new::( diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 23060df9e6..e2c6f24405 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -177,6 +177,7 @@ impl Network { pub async fn new( executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, + custody_group_count: u64, ) -> Result<(Self, Arc>), String> { let config = ctx.config.clone(); trace!("Libp2p Service starting"); @@ -201,11 +202,10 @@ impl Network { )?; // Construct the metadata - let custody_group_count = ctx.chain_spec.is_peer_das_scheduled().then(|| { - ctx.chain_spec - .custody_group_count(config.subscribe_all_data_column_subnets) - }); - let meta_data = utils::load_or_build_metadata(&config.network_dir, custody_group_count); + let advertised_cgc = config + .advertise_false_custody_group_count + .unwrap_or(custody_group_count); + let meta_data = utils::load_or_build_metadata(&config.network_dir, advertised_cgc); let seq_number = *meta_data.seq_number(); let globals = NetworkGlobals::new( enr, @@ -885,6 +885,23 @@ impl Network { } } + /// Subscribe to all data columns determined by the cgc. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] + pub fn subscribe_new_data_column_subnets(&mut self, custody_column_count: u64) { + self.network_globals + .update_data_column_subnets(custody_column_count); + + for column in self.network_globals.sampling_subnets() { + let kind = GossipKind::DataColumnSidecar(column); + self.subscribe_kind(kind); + } + } + /// Returns the scoring parameters for a topic if set. #[instrument(parent = None, level = "trace", @@ -1254,6 +1271,21 @@ impl Network { self.update_metadata_bitfields(); } + /// Updates the cgc value in the ENR. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] + pub fn update_enr_cgc(&mut self, new_custody_group_count: u64) { + if let Err(e) = self.discovery_mut().update_enr_cgc(new_custody_group_count) { + crit!(error = e, "Could not update cgc in ENR"); + } + // update the local meta data which informs our peers of the update during PINGS + self.update_metadata_cgc(new_custody_group_count); + } + /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we /// would like to retain the peers for. #[instrument(parent = None, @@ -1368,6 +1400,28 @@ impl Network { utils::save_metadata_to_disk(&self.network_dir, meta_data); } + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] + fn update_metadata_cgc(&mut self, custody_group_count: u64) { + let mut meta_data_w = self.network_globals.local_metadata.write(); + + *meta_data_w.seq_number_mut() += 1; + if let Ok(cgc) = meta_data_w.custody_group_count_mut() { + *cgc = custody_group_count; + } + let seq_number = *meta_data_w.seq_number(); + let meta_data = meta_data_w.clone(); + + drop(meta_data_w); + self.eth2_rpc_mut().update_seq_number(seq_number); + // Save the updated metadata to disk + utils::save_metadata_to_disk(&self.network_dir, meta_data); + } + /// Sends a Ping request to the peer. #[instrument(parent = None, level = "trace", diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 01929bcb01..9a93936874 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,6 +1,5 @@ use crate::multiaddr::Protocol; -use crate::rpc::methods::MetaDataV3; -use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; +use crate::rpc::{MetaData, MetaDataV2, MetaDataV3}; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind}; use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; @@ -165,38 +164,41 @@ pub fn strip_peer_id(addr: &mut Multiaddr) { /// Load metadata from persisted file. Return default metadata if loading fails. pub fn load_or_build_metadata( network_dir: &Path, - custody_group_count_opt: Option, + custody_group_count: u64, ) -> MetaData { - // We load a V2 metadata version by default (regardless of current fork) - // since a V2 metadata can be converted to V1. The RPC encoder is responsible + // We load a V3 metadata version by default (regardless of current fork) + // since a V3 metadata can be converted to V1 or V2. The RPC encoder is responsible // for sending the correct metadata version based on the negotiated protocol version. - let mut meta_data = MetaDataV2 { + let mut meta_data = MetaDataV3 { seq_number: 0, attnets: EnrAttestationBitfield::::default(), syncnets: EnrSyncCommitteeBitfield::::default(), + custody_group_count, }; + // Read metadata from persisted file if available let metadata_path = network_dir.join(METADATA_FILENAME); if let Ok(mut metadata_file) = File::open(metadata_path) { let mut metadata_ssz = Vec::new(); if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { - // Attempt to read a MetaDataV2 version from the persisted file, - // if that fails, read MetaDataV1 - match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { + // Attempt to read a MetaDataV3 version from the persisted file, + // if that fails, read MetaDataV2 + match MetaDataV3::::from_ssz_bytes(&metadata_ssz) { Ok(persisted_metadata) => { meta_data.seq_number = persisted_metadata.seq_number; // Increment seq number if persisted attnet is not default if persisted_metadata.attnets != meta_data.attnets || persisted_metadata.syncnets != meta_data.syncnets + || persisted_metadata.custody_group_count != meta_data.custody_group_count { meta_data.seq_number += 1; } debug!("Loaded metadata from disk"); } Err(_) => { - match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { + match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { Ok(persisted_metadata) => { - let persisted_metadata = MetaData::V1(persisted_metadata); + let persisted_metadata = MetaData::V2(persisted_metadata); // Increment seq number as the persisted metadata version is updated meta_data.seq_number = *persisted_metadata.seq_number() + 1; debug!("Loaded metadata from disk"); @@ -213,19 +215,8 @@ pub fn load_or_build_metadata( } }; - // Wrap the MetaData - let meta_data = if let Some(custody_group_count) = custody_group_count_opt { - MetaData::V3(MetaDataV3 { - attnets: meta_data.attnets, - seq_number: meta_data.seq_number, - syncnets: meta_data.syncnets, - custody_group_count, - }) - } else { - MetaData::V2(meta_data) - }; - - debug!(seq_num = meta_data.seq_number(), "Metadata sequence number"); + debug!(seq_num = meta_data.seq_number, "Metadata sequence number"); + let meta_data = MetaData::V3(meta_data); save_metadata_to_disk(network_dir, meta_data.clone()); meta_data } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index fd99d93589..d1ed1c33b0 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -31,10 +31,8 @@ pub struct NetworkGlobals { /// The current state of the backfill sync. pub backfill_state: RwLock, /// The computed sampling subnets and columns is stored to avoid re-computing. - pub sampling_subnets: HashSet, - pub sampling_columns: HashSet, - /// Constant custody group count (CGC) set at startup - custody_group_count: u64, + pub sampling_subnets: RwLock>, + pub sampling_columns: RwLock>, /// Network-related configuration. Immutable after initialization. pub config: Arc, /// Ethereum chain configuration. Immutable after initialization. @@ -87,6 +85,13 @@ impl NetworkGlobals { sampling_columns.extend(columns); } + tracing::debug!( + cgc = custody_group_count, + ?sampling_columns, + ?sampling_subnets, + "Starting node with custody params" + ); + NetworkGlobals { local_enr: RwLock::new(enr.clone()), peer_id: RwLock::new(enr.peer_id()), @@ -96,14 +101,40 @@ impl NetworkGlobals { gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::Paused), - sampling_subnets, - sampling_columns, - custody_group_count, + sampling_subnets: RwLock::new(sampling_subnets), + sampling_columns: RwLock::new(sampling_columns), config, spec, } } + /// Update the sampling subnets based on an updated cgc. + pub fn update_data_column_subnets(&self, custody_group_count: u64) { + // The below `expect` calls will panic on start up if the chain spec config values used + // are invalid + let sampling_size = self + .spec + .sampling_size(custody_group_count) + .expect("should compute node sampling size from valid chain spec"); + let custody_groups = + get_custody_groups(self.local_enr().node_id().raw(), sampling_size, &self.spec) + .expect("should compute node custody groups"); + + let mut sampling_subnets = self.sampling_subnets.write(); + for custody_index in &custody_groups { + let subnets = compute_subnets_from_custody_group(*custody_index, &self.spec) + .expect("should compute custody subnets for node"); + sampling_subnets.extend(subnets); + } + + let mut sampling_columns = self.sampling_columns.write(); + for custody_index in &custody_groups { + let columns = compute_columns_for_custody_group(*custody_index, &self.spec) + .expect("should compute custody columns for node"); + sampling_columns.extend(columns); + } + } + /// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect /// to. pub fn local_enr(&self) -> Enr { @@ -120,19 +151,6 @@ impl NetworkGlobals { self.listen_multiaddrs.read().clone() } - /// Returns true if this node is configured as a PeerDAS supernode - pub fn is_supernode(&self) -> bool { - self.custody_group_count == self.spec.number_of_custody_groups - } - - /// Returns the count of custody columns this node must sample for block import - pub fn custody_columns_count(&self) -> u64 { - // This only panics if the chain spec contains invalid values - self.spec - .sampling_size(self.custody_group_count) - .expect("should compute node sampling size from valid chain spec") - } - /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { self.peers.read().connected_peer_ids().count() @@ -226,10 +244,18 @@ impl NetworkGlobals { enable_light_client_server: self.config.enable_light_client_server, subscribe_all_subnets: self.config.subscribe_all_subnets, subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, - sampling_subnets: &self.sampling_subnets, + sampling_subnets: self.sampling_subnets.read().clone(), } } + pub fn sampling_columns(&self) -> HashSet { + self.sampling_columns.read().clone() + } + + pub fn sampling_subnets(&self) -> HashSet { + self.sampling_subnets.read().clone() + } + /// TESTING ONLY. Build a dummy NetworkGlobals instance. pub fn new_test_globals( trusted_peers: Vec, @@ -283,7 +309,7 @@ mod test { Arc::new(spec), ); assert_eq!( - globals.sampling_subnets.len(), + globals.sampling_subnets.read().len(), subnet_sampling_size as usize ); } @@ -306,7 +332,7 @@ mod test { Arc::new(spec), ); assert_eq!( - globals.sampling_columns.len(), + globals.sampling_columns.read().len(), subnet_sampling_size as usize ); } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 880b387250..21df75a648 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,8 +7,8 @@ use ssz::{Decode, Encode}; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttestationBase, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, - BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, + AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, @@ -27,10 +27,8 @@ pub enum PubsubMessage { DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), - /// Gossipsub message providing notification of a raw un-aggregated attestation with its subnet id. - Attestation(Box<(SubnetId, Attestation)>), - /// Gossipsub message providing notification of a `SingleAttestation`` with its subnet id. - SingleAttestation(Box<(SubnetId, SingleAttestation)>), + /// Gossipsub message providing notification of a `SingleAttestation` with its subnet id. + Attestation(Box<(SubnetId, SingleAttestation)>), /// Gossipsub message providing notification of a voluntary exit. VoluntaryExit(Box), /// Gossipsub message providing notification of a new proposer slashing. @@ -140,9 +138,6 @@ impl PubsubMessage { PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) } - PubsubMessage::SingleAttestation(attestation_data) => { - GossipKind::Attestation(attestation_data.0) - } PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit, PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing, PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, @@ -203,32 +198,12 @@ impl PubsubMessage { ))) } GossipKind::Attestation(subnet_id) => { - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(&fork_name) => { - if fork_name.electra_enabled() { - let single_attestation = - SingleAttestation::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; - Ok(PubsubMessage::SingleAttestation(Box::new(( - *subnet_id, - single_attestation, - )))) - } else { - let attestation = Attestation::Base( - AttestationBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ); - Ok(PubsubMessage::Attestation(Box::new(( - *subnet_id, - attestation, - )))) - } - } - None => Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )), - } + let attestation = SingleAttestation::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::Attestation(Box::new(( + *subnet_id, + attestation, + )))) } GossipKind::BeaconBlock => { let beacon_block = @@ -418,7 +393,6 @@ impl PubsubMessage { PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(), PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), - PubsubMessage::SingleAttestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(), @@ -457,13 +431,6 @@ impl std::fmt::Display for PubsubMessage { att.message().aggregator_index(), ), PubsubMessage::Attestation(data) => write!( - f, - "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {:?}", - *data.0, - data.1.data().slot, - data.1.committee_index(), - ), - PubsubMessage::SingleAttestation(data) => write!( f, "SingleAttestation: subnet_id: {}, attestation_slot: {}, committee_index: {:?}, attester_index: {:?}", *data.0, diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 56b97303d3..349bfe66a3 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -26,11 +26,11 @@ pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; #[derive(Debug)] -pub struct TopicConfig<'a> { +pub struct TopicConfig { pub enable_light_client_server: bool, pub subscribe_all_subnets: bool, pub subscribe_all_data_column_subnets: bool, - pub sampling_subnets: &'a HashSet, + pub sampling_subnets: HashSet, } /// Returns all the topics the node should subscribe at `fork_name` @@ -85,7 +85,7 @@ pub fn core_topics_to_subscribe( topics.push(GossipKind::DataColumnSidecar(i.into())); } } else { - for subnet in opts.sampling_subnets { + for subnet in &opts.sampling_subnets { topics.push(GossipKind::DataColumnSidecar(*subnet)); } } @@ -126,7 +126,7 @@ pub fn all_topics_at_fork(fork: ForkName, spec: &ChainSpec) -> Vec(fork, &opts, spec) } @@ -521,7 +521,7 @@ mod tests { enable_light_client_server: false, subscribe_all_subnets: false, subscribe_all_data_column_subnets: false, - sampling_subnets, + sampling_subnets: sampling_subnets.clone(), } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index d979ef9265..0dac126909 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -118,6 +118,7 @@ pub async fn build_libp2p_instance( let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, shutdown_tx, service_name); + let custody_group_count = chain_spec.custody_requirement; let libp2p_context = lighthouse_network::Context { config, enr_fork_id: EnrForkId::default(), @@ -126,7 +127,7 @@ pub async fn build_libp2p_instance( libp2p_registry: None, }; Libp2pInstance( - LibP2PService::new(executor, libp2p_context) + LibP2PService::new(executor, libp2p_context, custody_group_count) .await .expect("should build libp2p instance") .0, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 72d7aa0074..e50f70e43a 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -75,22 +75,22 @@ fn test_tcp_status_rpc() { .await; // Dummy STATUS RPC message - let rpc_request = RequestType::Status(StatusMessage { + let rpc_request = RequestType::Status(StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), head_root: Hash256::zero(), head_slot: Slot::new(1), - }); + })); // Dummy STATUS RPC message - let rpc_response = Response::Status(StatusMessage { + let rpc_response = Response::Status(StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), head_root: Hash256::zero(), head_slot: Slot::new(1), - }); + })); // build the sender future let sender_future = async { @@ -1199,22 +1199,22 @@ fn test_delayed_rpc_response() { .await; // Dummy STATUS RPC message - let rpc_request = RequestType::Status(StatusMessage { + let rpc_request = RequestType::Status(StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::from_low_u64_be(0), finalized_epoch: Epoch::new(1), head_root: Hash256::from_low_u64_be(0), head_slot: Slot::new(1), - }); + })); // Dummy STATUS RPC message - let rpc_response = Response::Status(StatusMessage { + let rpc_response = Response::Status(StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::from_low_u64_be(0), finalized_epoch: Epoch::new(1), head_root: Hash256::from_low_u64_be(0), head_slot: Slot::new(1), - }); + })); // build the sender future let sender_future = async { @@ -1246,10 +1246,12 @@ fn test_delayed_rpc_response() { // The second and subsequent responses are delayed due to the response rate-limiter on the receiver side. // Adding a slight margin to the elapsed time check to account for potential timing issues caused by system // scheduling or execution delays during testing. + // https://github.com/sigp/lighthouse/issues/7466 + let margin = 500; assert!( request_sent_at.elapsed() > (Duration::from_secs(QUOTA_SEC) - - Duration::from_millis(100)) + - Duration::from_millis(margin)) ); if request_id == 5 { // End the test @@ -1327,22 +1329,22 @@ fn test_active_requests() { .await; // Dummy STATUS RPC request. - let rpc_request = RequestType::Status(StatusMessage { + let rpc_request = RequestType::Status(StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::from_low_u64_be(0), finalized_epoch: Epoch::new(1), head_root: Hash256::from_low_u64_be(0), head_slot: Slot::new(1), - }); + })); // Dummy STATUS RPC response. - let rpc_response = Response::Status(StatusMessage { + let rpc_response = Response::Status(StatusMessage::V1(StatusMessageV1 { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), head_root: Hash256::zero(), head_slot: Slot::new(1), - }); + })); // Number of requests. const REQUESTS: u8 = 10; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b129b54841..05c7dc287b 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -780,7 +780,7 @@ pub fn update_sync_metrics(network_globals: &Arc>) let all_column_subnets = (0..network_globals.spec.data_column_sidecar_subnet_count).map(DataColumnSubnetId::new); - let custody_column_subnets = network_globals.sampling_subnets.iter(); + let custody_column_subnets = network_globals.sampling_subnets(); // Iterate all subnet values to set to zero the empty entries in peers_per_column_subnet for subnet in all_column_subnets { @@ -794,7 +794,7 @@ pub fn update_sync_metrics(network_globals: &Arc>) // Registering this metric is a duplicate for supernodes but helpful for fullnodes. This way // operators can monitor the health of only the subnets of their interest without complex // Grafana queries. - for subnet in custody_column_subnets { + for subnet in custody_column_subnets.iter() { set_gauge_entry( &PEERS_PER_CUSTODY_COLUMN_SUBNET, &[&format!("{subnet}")], diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 638f9e4824..6bdcd02197 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -14,12 +14,12 @@ use beacon_chain::{ light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, - single_attestation::single_attestation_to_attestation, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; +use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use logging::crit; use operation_pool::ReceivedPreCapella; @@ -31,7 +31,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; -use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; use tracing::{debug, error, info, trace, warn}; use types::{ beacon_block::BlockImportSource, Attestation, AttestationData, AttestationRef, @@ -42,6 +42,7 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; +use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; use beacon_processor::{ work_reprocessing_queue::{ QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, @@ -66,7 +67,7 @@ struct VerifiedUnaggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. impl VerifiedAttestation for VerifiedUnaggregate { - fn attestation(&self) -> AttestationRef { + fn attestation(&self) -> AttestationRef<'_, T::EthSpec> { self.attestation.to_ref() } @@ -82,8 +83,8 @@ impl VerifiedAttestation for VerifiedUnaggregate { } /// An attestation that failed validation by the `BeaconChain`. -struct RejectedUnaggregate { - attestation: Box>, +struct RejectedUnaggregate { + attestation: Box, error: AttnError, } @@ -99,7 +100,7 @@ struct VerifiedAggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. impl VerifiedAttestation for VerifiedAggregate { - fn attestation(&self) -> AttestationRef { + fn attestation(&self) -> AttestationRef<'_, T::EthSpec> { self.signed_aggregate.message().aggregate() } @@ -124,16 +125,11 @@ struct RejectedAggregate { /// Data for an aggregated or unaggregated attestation that failed verification. enum FailedAtt { Unaggregate { - attestation: Box>, + attestation: Box, subnet_id: SubnetId, should_import: bool, seen_timestamp: Duration, }, - // This variant is just a dummy variant for now, as SingleAttestation reprocessing is handled - // separately. - SingleUnaggregate { - attestation: Box, - }, Aggregate { attestation: Box>, seen_timestamp: Duration, @@ -148,15 +144,13 @@ impl FailedAtt { pub fn kind(&self) -> &'static str { match self { FailedAtt::Unaggregate { .. } => "unaggregated", - FailedAtt::SingleUnaggregate { .. } => "unaggregated", FailedAtt::Aggregate { .. } => "aggregated", } } pub fn attestation_data(&self) -> &AttestationData { match self { - FailedAtt::Unaggregate { attestation, .. } => attestation.data(), - FailedAtt::SingleUnaggregate { attestation, .. } => &attestation.data, + FailedAtt::Unaggregate { attestation, .. } => &attestation.data, FailedAtt::Aggregate { attestation, .. } => attestation.message().aggregate().data(), } } @@ -208,20 +202,24 @@ impl NetworkBeaconProcessor { self: Arc, message_id: MessageId, peer_id: PeerId, - attestation: Box>, + attestation: Box, subnet_id: SubnetId, should_import: bool, - reprocess_tx: Option>, + allow_reprocess: bool, seen_timestamp: Duration, ) { let result = match self .chain .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)) { - Ok(verified_attestation) => Ok(VerifiedUnaggregate { - indexed_attestation: verified_attestation.into_indexed_attestation(), - attestation, - }), + Ok(verified_attestation) => { + let attestation = + Box::new(verified_attestation.attestation().clone_as_attestation()); + Ok(VerifiedUnaggregate { + indexed_attestation: verified_attestation.into_indexed_attestation(), + attestation, + }) + } Err(error) => Err(RejectedUnaggregate { attestation, error }), }; @@ -230,7 +228,7 @@ impl NetworkBeaconProcessor { message_id, peer_id, subnet_id, - reprocess_tx, + allow_reprocess, should_import, seen_timestamp, ); @@ -238,8 +236,8 @@ impl NetworkBeaconProcessor { pub fn process_gossip_attestation_batch( self: Arc, - packages: GossipAttestationBatch, - reprocess_tx: Option>, + packages: GossipAttestationBatch, + allow_reprocess: bool, ) { let attestations_and_subnets = packages .iter() @@ -275,14 +273,19 @@ impl NetworkBeaconProcessor { #[allow(clippy::needless_collect)] // The clippy suggestion fails the borrow checker. let results = results .into_iter() - .map(|result| result.map(|verified| verified.into_indexed_attestation())) + .map(|result| { + result.map(|verified| { + let attestation = verified.attestation().clone_as_attestation(); + (verified.into_indexed_attestation(), attestation) + }) + }) .collect::>(); for (result, package) in results.into_iter().zip(packages.into_iter()) { let result = match result { - Ok(indexed_attestation) => Ok(VerifiedUnaggregate { + Ok((indexed_attestation, attestation)) => Ok(VerifiedUnaggregate { indexed_attestation, - attestation: package.attestation, + attestation: Box::new(attestation), }), Err(error) => Err(RejectedUnaggregate { attestation: package.attestation, @@ -295,7 +298,7 @@ impl NetworkBeaconProcessor { package.message_id, package.peer_id, package.subnet_id, - reprocess_tx.clone(), + allow_reprocess, package.should_import, package.seen_timestamp, ); @@ -307,11 +310,11 @@ impl NetworkBeaconProcessor { #[allow(clippy::too_many_arguments)] fn process_gossip_attestation_result( self: &Arc, - result: Result, RejectedUnaggregate>, + result: Result, RejectedUnaggregate>, message_id: MessageId, peer_id: PeerId, subnet_id: SubnetId, - reprocess_tx: Option>, + allow_reprocess: bool, should_import: bool, seen_timestamp: Duration, ) { @@ -395,7 +398,7 @@ impl NetworkBeaconProcessor { should_import, seen_timestamp, }, - reprocess_tx, + allow_reprocess, error, seen_timestamp, ); @@ -403,147 +406,6 @@ impl NetworkBeaconProcessor { } } - /// Process an unaggregated attestation requiring conversion. - /// - /// This function performs the conversion, and if successfull queues a new message to be - /// processed by `process_gossip_attestation`. If unsuccessful due to block unavailability, - /// a retry message will be pushed to the `reprocess_tx` if it is `Some`. - #[allow(clippy::too_many_arguments)] - pub fn process_gossip_attestation_to_convert( - self: Arc, - message_id: MessageId, - peer_id: PeerId, - single_attestation: Box, - subnet_id: SubnetId, - should_import: bool, - reprocess_tx: Option>, - seen_timestamp: Duration, - ) { - let conversion_result = self.chain.with_committee_cache( - single_attestation.data.target.root, - single_attestation - .data - .slot - .epoch(T::EthSpec::slots_per_epoch()), - |committee_cache, _| { - let slot = single_attestation.data.slot; - let committee_index = single_attestation.committee_index; - let Some(committee) = committee_cache.get_beacon_committee(slot, committee_index) - else { - return Ok(Err(AttnError::NoCommitteeForSlotAndIndex { - slot, - index: committee_index, - })); - }; - - Ok(single_attestation_to_attestation( - &single_attestation, - committee.committee, - )) - }, - ); - - match conversion_result { - Ok(Ok(attestation)) => { - let slot = attestation.data().slot; - if let Err(e) = self.send_unaggregated_attestation( - message_id.clone(), - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - ) { - error!( - error = %e, - %slot, - "Unable to queue converted SingleAttestation" - ); - self.propagate_validation_result( - message_id, - peer_id, - MessageAcceptance::Ignore, - ); - } - } - // Outermost error (from `with_committee_cache`) indicating that the block is not known - // and that this conversion should be retried. - Err(BeaconChainError::MissingBeaconBlock(beacon_block_root)) => { - if let Some(sender) = reprocess_tx { - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL, - ); - // We don't know the block, get the sync manager to handle the block lookup, and - // send the attestation to be scheduled for re-processing. - self.sync_tx - .send(SyncMessage::UnknownBlockHashFromAttestation( - peer_id, - beacon_block_root, - )) - .unwrap_or_else(|_| { - warn!(msg = "UnknownBlockHash", "Failed to send to sync service") - }); - let processor = self.clone(); - // Do not allow this attestation to be re-processed beyond this point. - let reprocess_msg = - ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { - beacon_block_root, - process_fn: Box::new(move || { - processor.process_gossip_attestation_to_convert( - message_id, - peer_id, - single_attestation, - subnet_id, - should_import, - None, - seen_timestamp, - ) - }), - }); - if sender.try_send(reprocess_msg).is_err() { - error!("Failed to send attestation for re-processing") - } - } else { - // We shouldn't make any further attempts to process this attestation. - // - // Don't downscore the peer since it's not clear if we requested this head - // block from them or not. - self.propagate_validation_result( - message_id, - peer_id, - MessageAcceptance::Ignore, - ); - } - } - Ok(Err(error)) => { - // We already handled reprocessing above so do not attempt it in the error handler. - self.handle_attestation_verification_failure( - peer_id, - message_id, - FailedAtt::SingleUnaggregate { - attestation: single_attestation, - }, - None, - error, - seen_timestamp, - ); - } - Err(error) => { - // We already handled reprocessing above so do not attempt it in the error handler. - self.handle_attestation_verification_failure( - peer_id, - message_id, - FailedAtt::SingleUnaggregate { - attestation: single_attestation, - }, - None, - AttnError::BeaconChainError(Box::new(error)), - seen_timestamp, - ); - } - } - } - /// Process the aggregated attestation received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -556,7 +418,7 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, aggregate: Box>, - reprocess_tx: Option>, + allow_reprocess: bool, seen_timestamp: Duration, ) { let beacon_block_root = aggregate.message().aggregate().data().beacon_block_root; @@ -580,7 +442,7 @@ impl NetworkBeaconProcessor { beacon_block_root, message_id, peer_id, - reprocess_tx, + allow_reprocess, seen_timestamp, ); } @@ -588,7 +450,7 @@ impl NetworkBeaconProcessor { pub fn process_gossip_aggregate_batch( self: Arc, packages: Vec>, - reprocess_tx: Option>, + allow_reprocess: bool, ) { let aggregates = packages.iter().map(|package| package.aggregate.as_ref()); @@ -642,7 +504,7 @@ impl NetworkBeaconProcessor { package.beacon_block_root, package.message_id, package.peer_id, - reprocess_tx.clone(), + allow_reprocess, package.seen_timestamp, ); } @@ -654,7 +516,7 @@ impl NetworkBeaconProcessor { beacon_block_root: Hash256, message_id: MessageId, peer_id: PeerId, - reprocess_tx: Option>, + allow_reprocess: bool, seen_timestamp: Duration, ) { match result { @@ -733,7 +595,7 @@ impl NetworkBeaconProcessor { attestation: signed_aggregate, seen_timestamp, }, - reprocess_tx, + allow_reprocess, error, seen_timestamp, ); @@ -797,6 +659,19 @@ impl NetworkBeaconProcessor { } Err(err) => { match err { + GossipDataColumnError::PriorKnownUnpublished => { + debug!( + %slot, + %block_root, + %index, + "Gossip data column already processed via the EL. Accepting the column sidecar without re-processing." + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Accept, + ); + } GossipDataColumnError::ParentUnknown { parent_root } => { debug!( action = "requesting parent", @@ -1160,8 +1035,35 @@ impl NetworkBeaconProcessor { "Processed data column, waiting for other components" ); - self.attempt_data_column_reconstruction(block_root, true) - .await; + // Instead of triggering reconstruction immediately, schedule it to be run. If + // another column arrives it either completes availability or pushes + // reconstruction back a bit. + let cloned_self = Arc::clone(self); + let send_result = self.beacon_processor_send.try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::DelayColumnReconstruction( + QueuedColumnReconstruction { + block_root, + process_fn: Box::pin(async move { + cloned_self + .attempt_data_column_reconstruction(block_root, true) + .await; + }), + }, + )), + }); + if let Err(TrySendError::Full(WorkEvent { + work: + Work::Reprocess(ReprocessQueueMessage::DelayColumnReconstruction( + reconstruction, + )), + .. + })) = send_result + { + warn!("Unable to send reconstruction to reprocessing"); + // Execute it immediately instead. + reconstruction.process_fn.await; + } } }, Err(BlockError::DuplicateFullyImported(_)) => { @@ -1201,7 +1103,6 @@ impl NetworkBeaconProcessor { peer_id: PeerId, peer_client: Client, block: Arc>, - reprocess_tx: mpsc::Sender, duplicate_cache: DuplicateCache, invalid_block_storage: InvalidBlockStorage, seen_duration: Duration, @@ -1212,7 +1113,6 @@ impl NetworkBeaconProcessor { peer_id, peer_client, block.clone(), - reprocess_tx.clone(), seen_duration, ) .await @@ -1223,7 +1123,6 @@ impl NetworkBeaconProcessor { self.process_gossip_verified_block( peer_id, gossip_verified_block, - reprocess_tx, invalid_block_storage, seen_duration, ) @@ -1249,7 +1148,6 @@ impl NetworkBeaconProcessor { peer_id: PeerId, peer_client: Client, block: Arc>, - reprocess_tx: mpsc::Sender, seen_duration: Duration, ) -> Option> { let block_delay = @@ -1259,10 +1157,7 @@ impl NetworkBeaconProcessor { let verification_result = self .chain .clone() - .verify_block_for_gossip( - block.clone(), - self.network_globals.custody_columns_count() as usize, - ) + .verify_block_for_gossip(block.clone()) .await; if verification_result.is_ok() { @@ -1409,7 +1304,8 @@ impl NetworkBeaconProcessor { | Err(e @ BlockError::ExecutionPayloadError(_)) | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::KnownInvalidExecutionPayload(_)) - | Err(e @ BlockError::GenesisBlock) => { + | Err(e @ BlockError::GenesisBlock) + | Err(e @ BlockError::InvalidBlobCount { .. }) => { warn!(error = %e, "Could not verify block for gossip. Rejecting the block"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -1479,24 +1375,28 @@ impl NetworkBeaconProcessor { let inner_self = self.clone(); let process_fn = Box::pin(async move { - let reprocess_tx = inner_self.reprocess_tx.clone(); let invalid_block_storage = inner_self.invalid_block_storage.clone(); inner_self .process_gossip_verified_block( peer_id, verified_block, - reprocess_tx, invalid_block_storage, seen_duration, ) .await; }); - if reprocess_tx - .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { - beacon_block_slot: block_slot, - beacon_block_root: block_root, - process_fn, - })) + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::EarlyBlock( + QueuedGossipBlock { + beacon_block_slot: block_slot, + beacon_block_root: block_root, + process_fn, + }, + )), + }) .is_err() { error!( @@ -1529,7 +1429,6 @@ impl NetworkBeaconProcessor { self: Arc, peer_id: PeerId, verified_block: GossipVerifiedBlock, - reprocess_tx: mpsc::Sender, invalid_block_storage: InvalidBlockStorage, _seen_duration: Duration, ) { @@ -1579,10 +1478,14 @@ impl NetworkBeaconProcessor { match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - if reprocess_tx - .try_send(ReprocessQueueMessage::BlockImported { - block_root: *block_root, - parent_root: block.message().parent_root(), + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::BlockImported { + block_root: *block_root, + parent_root: block.message().parent_root(), + }), }) .is_err() { @@ -2107,7 +2010,7 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, - reprocess_tx: Option>, + allow_reprocess: bool, seen_timestamp: Duration, ) { match self.chain.verify_optimistic_update_for_gossip( @@ -2135,7 +2038,7 @@ impl NetworkBeaconProcessor { "Optimistic update for unknown block" ); - if let Some(sender) = reprocess_tx { + if allow_reprocess { let processor = self.clone(); let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( QueuedLightClientUpdate { @@ -2145,14 +2048,21 @@ impl NetworkBeaconProcessor { message_id, peer_id, light_client_optimistic_update, - None, // Do not reprocess this message again. + false, // Do not reprocess this message again. seen_timestamp, ) }), }, ); - if sender.try_send(msg).is_err() { + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: true, + work: Work::Reprocess(msg), + }) + .is_err() + { error!("Failed to send optimistic update for re-processing") } } else { @@ -2222,7 +2132,7 @@ impl NetworkBeaconProcessor { peer_id: PeerId, message_id: MessageId, failed_att: FailedAtt, - reprocess_tx: Option>, + allow_reprocess: bool, error: AttnError, seen_timestamp: Duration, ) { @@ -2462,7 +2372,7 @@ impl NetworkBeaconProcessor { block = ?beacon_block_root, "Attestation for unknown block" ); - if let Some(sender) = reprocess_tx { + if allow_reprocess { // We don't know the block, get the sync manager to handle the block lookup, and // send the attestation to be scheduled for re-processing. self.sync_tx @@ -2489,22 +2399,12 @@ impl NetworkBeaconProcessor { message_id, peer_id, attestation, - None, // Do not allow this attestation to be re-processed beyond this point. + false, // Do not allow this attestation to be re-processed beyond this point. seen_timestamp, ) }), }) } - FailedAtt::SingleUnaggregate { .. } => { - // This should never happen, as we handle the unknown head block case - // for `SingleAttestation`s separately and should not be able to hit - // an `UnknownHeadBlock` error. - error!( - block_root = ?beacon_block_root, - "Dropping SingleAttestation instead of requeueing" - ); - return; - } FailedAtt::Unaggregate { attestation, subnet_id, @@ -2524,7 +2424,7 @@ impl NetworkBeaconProcessor { attestation, subnet_id, should_import, - None, // Do not allow this attestation to be re-processed beyond this point. + false, // Do not allow this attestation to be re-processed beyond this point. seen_timestamp, ) }), @@ -2532,7 +2432,14 @@ impl NetworkBeaconProcessor { } }; - if sender.try_send(msg).is_err() { + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(msg), + }) + .is_err() + { error!("Failed to send attestation for re-processing") } } else { @@ -2600,19 +2507,6 @@ impl NetworkBeaconProcessor { "attn_no_committee", ); } - AttnError::NotExactlyOneAggregationBitSet(_) => { - /* - * The unaggregated attestation doesn't have only one signature. - * - * The peer has published an invalid consensus message. - */ - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer( - peer_id, - PeerAction::LowToleranceError, - "attn_too_many_agg_bits", - ); - } AttnError::NotExactlyOneCommitteeBitSet(_) => { /* * The attestation doesn't have only one committee bit set. @@ -2762,6 +2656,26 @@ impl NetworkBeaconProcessor { MessageAcceptance::Ignore, ); } + BeaconChainError::AttestationValidationError(e) => { + // Failures from `get_attesting_indices` end up here. + debug!( + %peer_id, + block_root = ?beacon_block_root, + attestation_slot = %failed_att.attestation_data().slot, + error = ?e, + "Rejecting attestation that failed validation" + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_validation_error", + ); + } _ => { /* * Lighthouse hit an unexpected error whilst processing the attestation. It diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index ba681eed14..f7c3a1bf8d 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -5,15 +5,15 @@ use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::data_column_verification::{observe_gossip_data_column, GossipDataColumnError}; use beacon_chain::fetch_blobs::{ - fetch_and_process_engine_blobs, BlobsOrDataColumns, FetchEngineBlobError, + fetch_and_process_engine_blobs, EngineGetBlobsOutput, FetchEngineBlobError, }; use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer, }; use beacon_processor::{ - work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, - GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, + BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, + WorkEvent as BeaconWorkEvent, }; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, @@ -61,7 +61,6 @@ pub struct NetworkBeaconProcessor { pub chain: Arc>, pub network_tx: mpsc::UnboundedSender>, pub sync_tx: mpsc::UnboundedSender>, - pub reprocess_tx: mpsc::Sender, pub network_globals: Arc>, pub invalid_block_storage: InvalidBlockStorage, pub executor: TaskExecutor, @@ -75,78 +74,34 @@ impl NetworkBeaconProcessor { self.beacon_processor_send.try_send(event) } - /// Create a new `Work` event for some `SingleAttestation`. - pub fn send_single_attestation( - self: &Arc, - message_id: MessageId, - peer_id: PeerId, - single_attestation: SingleAttestation, - subnet_id: SubnetId, - should_import: bool, - seen_timestamp: Duration, - ) -> Result<(), Error> { - let processor = self.clone(); - let process_individual = move |package: GossipAttestationPackage| { - let reprocess_tx = processor.reprocess_tx.clone(); - processor.process_gossip_attestation_to_convert( - package.message_id, - package.peer_id, - package.attestation, - package.subnet_id, - package.should_import, - Some(reprocess_tx), - package.seen_timestamp, - ) - }; - - self.try_send(BeaconWorkEvent { - drop_during_sync: true, - work: Work::GossipAttestationToConvert { - attestation: Box::new(GossipAttestationPackage { - message_id, - peer_id, - attestation: Box::new(single_attestation), - subnet_id, - should_import, - seen_timestamp, - }), - process_individual: Box::new(process_individual), - }, - }) - } - /// Create a new `Work` event for some unaggregated attestation. pub fn send_unaggregated_attestation( self: &Arc, message_id: MessageId, peer_id: PeerId, - attestation: Attestation, + attestation: SingleAttestation, subnet_id: SubnetId, should_import: bool, seen_timestamp: Duration, ) -> Result<(), Error> { // Define a closure for processing individual attestations. let processor = self.clone(); - let process_individual = - move |package: GossipAttestationPackage>| { - let reprocess_tx = processor.reprocess_tx.clone(); - processor.process_gossip_attestation( - package.message_id, - package.peer_id, - package.attestation, - package.subnet_id, - package.should_import, - Some(reprocess_tx), - package.seen_timestamp, - ) - }; + let process_individual = move |package: GossipAttestationPackage| { + processor.process_gossip_attestation( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + true, + package.seen_timestamp, + ) + }; // Define a closure for processing batches of attestations. let processor = self.clone(); - let process_batch = move |attestations| { - let reprocess_tx = processor.reprocess_tx.clone(); - processor.process_gossip_attestation_batch(attestations, Some(reprocess_tx)) - }; + let process_batch = + move |attestations| processor.process_gossip_attestation_batch(attestations, true); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -176,22 +131,19 @@ impl NetworkBeaconProcessor { // Define a closure for processing individual attestations. let processor = self.clone(); let process_individual = move |package: GossipAggregatePackage| { - let reprocess_tx = processor.reprocess_tx.clone(); processor.process_gossip_aggregate( package.message_id, package.peer_id, package.aggregate, - Some(reprocess_tx), + true, package.seen_timestamp, ) }; // Define a closure for processing batches of attestations. let processor = self.clone(); - let process_batch = move |aggregates| { - let reprocess_tx = processor.reprocess_tx.clone(); - processor.process_gossip_aggregate_batch(aggregates, Some(reprocess_tx)) - }; + let process_batch = + move |aggregates| processor.process_gossip_aggregate_batch(aggregates, true); let beacon_block_root = aggregate.message().aggregate().data().beacon_block_root; self.try_send(BeaconWorkEvent { @@ -221,7 +173,6 @@ impl NetworkBeaconProcessor { ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { - let reprocess_tx = processor.reprocess_tx.clone(); let invalid_block_storage = processor.invalid_block_storage.clone(); let duplicate_cache = processor.duplicate_cache.clone(); processor @@ -230,7 +181,6 @@ impl NetworkBeaconProcessor { peer_id, peer_client, block, - reprocess_tx, duplicate_cache, invalid_block_storage, seen_timestamp, @@ -423,12 +373,11 @@ impl NetworkBeaconProcessor { ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - let reprocess_tx = processor.reprocess_tx.clone(); processor.process_gossip_optimistic_update( message_id, peer_id, light_client_optimistic_update, - Some(reprocess_tx), + true, seen_timestamp, ) }; @@ -843,16 +792,19 @@ impl NetworkBeaconProcessor { block_root: Hash256, publish_blobs: bool, ) { - let custody_columns = self.network_globals.sampling_columns.clone(); + let custody_columns = self.network_globals.sampling_columns(); let self_cloned = self.clone(); let publish_fn = move |blobs_or_data_column| { if publish_blobs { match blobs_or_data_column { - BlobsOrDataColumns::Blobs(blobs) => { + EngineGetBlobsOutput::Blobs(blobs) => { self_cloned.publish_blobs_gradually(blobs, block_root); } - BlobsOrDataColumns::DataColumns(columns) => { - self_cloned.publish_data_columns_gradually(columns, block_root); + EngineGetBlobsOutput::CustodyColumns(columns) => { + self_cloned.publish_data_columns_gradually( + columns.into_iter().map(|c| c.clone_arc()).collect(), + block_root, + ); } }; } @@ -927,7 +879,12 @@ impl NetworkBeaconProcessor { publish_columns: bool, ) -> Option { // Only supernodes attempt reconstruction - if !self.network_globals.is_supernode() { + if !self + .chain + .data_availability_checker + .custody_context() + .current_is_supernode + { return None; } @@ -1139,16 +1096,13 @@ impl NetworkBeaconProcessor { #[cfg(test)] use { - beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend}, - beacon_processor::BeaconProcessorChannels, - slot_clock::ManualSlotClock, - store::MemoryStore, - tokio::sync::mpsc::UnboundedSender, + beacon_chain::builder::Witness, beacon_processor::BeaconProcessorChannels, + slot_clock::ManualSlotClock, store::MemoryStore, tokio::sync::mpsc::UnboundedSender, }; #[cfg(test)] pub(crate) type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; + Witness, MemoryStore>; #[cfg(test)] impl NetworkBeaconProcessor> { @@ -1165,8 +1119,6 @@ impl NetworkBeaconProcessor> { let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx: _work_reprocessing_rx, } = <_>::default(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); @@ -1177,7 +1129,6 @@ impl NetworkBeaconProcessor> { chain, network_tx, sync_tx, - reprocess_tx: work_reprocessing_tx, network_globals, invalid_block_storage: InvalidBlockStorage::Disabled, executor, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 7c3c854ed8..4004305f83 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -70,14 +70,14 @@ impl NetworkBeaconProcessor { let local = self.chain.status_message(); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - let irrelevant_reason = if local.fork_digest != remote.fork_digest { + let irrelevant_reason = if local.fork_digest() != remote.fork_digest() { // The node is on a different network/fork Some(format!( "Incompatible forks Ours:{} Theirs:{}", - hex::encode(local.fork_digest), - hex::encode(remote.fork_digest) + hex::encode(local.fork_digest()), + hex::encode(remote.fork_digest()) )) - } else if remote.head_slot + } else if *remote.head_slot() > self .chain .slot() @@ -88,11 +88,11 @@ impl NetworkBeaconProcessor { // current slot. This could be because they are using a different genesis time, or that // their or our system's clock is incorrect. Some("Different system clocks or genesis time".to_string()) - } else if (remote.finalized_epoch == local.finalized_epoch - && remote.finalized_root == local.finalized_root) - || remote.finalized_root.is_zero() - || local.finalized_root.is_zero() - || remote.finalized_epoch > local.finalized_epoch + } else if (remote.finalized_epoch() == local.finalized_epoch() + && remote.finalized_root() == local.finalized_root()) + || remote.finalized_root().is_zero() + || local.finalized_root().is_zero() + || remote.finalized_epoch() > local.finalized_epoch() { // Fast path. Remote finalized checkpoint is either identical, or genesis, or we are at // genesis, or they are ahead. In all cases, we should allow this peer to connect to us @@ -100,7 +100,7 @@ impl NetworkBeaconProcessor { None } else { // Remote finalized epoch is less than ours. - let remote_finalized_slot = start_slot(remote.finalized_epoch); + let remote_finalized_slot = start_slot(*remote.finalized_epoch()); if remote_finalized_slot < self.chain.store.get_oldest_block_slot() { // Peer's finalized checkpoint is older than anything in our DB. We are unlikely // to be able to help them sync. @@ -112,7 +112,7 @@ impl NetworkBeaconProcessor { if self .chain .block_root_at_slot(remote_finalized_slot, WhenSlotSkipped::Prev) - .map(|root_opt| root_opt != Some(remote.finalized_root)) + .map(|root_opt| root_opt != Some(*remote.finalized_root())) .map_err(Box::new)? { Some("Different finalized chain".to_string()) @@ -138,10 +138,11 @@ impl NetworkBeaconProcessor { } Ok(None) => { let info = SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, + head_slot: *status.head_slot(), + head_root: *status.head_root(), + finalized_epoch: *status.finalized_epoch(), + finalized_root: *status.finalized_root(), + earliest_available_slot: status.earliest_available_slot().ok().cloned(), }; self.send_sync_message(SyncMessage::AddPeer(peer_id, info)); } @@ -944,12 +945,18 @@ impl NetworkBeaconProcessor { match self.chain.get_blobs(&root) { Ok(blob_sidecar_list) => { for blob_sidecar in blob_sidecar_list.iter() { - blobs_sent += 1; - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - inbound_request_id, - response: Response::BlobsByRange(Some(blob_sidecar.clone())), - }); + // Due to skip slots, blobs could be out of the range, we ensure they + // are in the range before sending + if blob_sidecar.slot() >= request_start_slot + && blob_sidecar.slot() < request_start_slot + req.count + { + blobs_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::BlobsByRange(Some(blob_sidecar.clone())), + }); + } } } Err(e) => { @@ -1057,14 +1064,20 @@ impl NetworkBeaconProcessor { for index in &req.columns { match self.chain.get_data_column(&root, index) { Ok(Some(data_column_sidecar)) => { - data_columns_sent += 1; - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - inbound_request_id, - response: Response::DataColumnsByRange(Some( - data_column_sidecar.clone(), - )), - }); + // Due to skip slots, data columns could be out of the range, we ensure they + // are in the range before sending + if data_column_sidecar.slot() >= request_start_slot + && data_column_sidecar.slot() < request_start_slot + req.count + { + data_columns_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::DataColumnsByRange(Some( + data_column_sidecar.clone(), + )), + }); + } } Ok(None) => {} // no-op Err(e) => { diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 31b17a41a4..cff6e26165 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -17,11 +17,11 @@ use beacon_processor::{ work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, AsyncFn, BlockingFn, DuplicateCache, }; +use beacon_processor::{Work, WorkEvent}; use lighthouse_network::PeerAction; use std::sync::Arc; use std::time::Duration; use store::KzgCommitment; -use tokio::sync::mpsc; use tracing::{debug, error, info, warn}; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; @@ -57,14 +57,12 @@ impl NetworkBeaconProcessor { process_type: BlockProcessType, ) -> AsyncFn { let process_fn = async move { - let reprocess_tx = self.reprocess_tx.clone(); let duplicate_cache = self.duplicate_cache.clone(); self.process_rpc_block( block_root, block, seen_timestamp, process_type, - reprocess_tx, duplicate_cache, ) .await; @@ -106,7 +104,6 @@ impl NetworkBeaconProcessor { block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, - reprocess_tx: mpsc::Sender, duplicate_cache: DuplicateCache, ) { // Check if the block is already being imported through another source @@ -131,7 +128,14 @@ impl NetworkBeaconProcessor { ignore_fn, }); - if reprocess_tx.try_send(reprocess_msg).is_err() { + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(reprocess_msg), + }) + .is_err() + { error!(source = "rpc", %block_root,"Failed to inform block import") }; return; @@ -176,7 +180,14 @@ impl NetworkBeaconProcessor { block_root: *hash, parent_root, }; - if reprocess_tx.try_send(reprocess_msg).is_err() { + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(reprocess_msg), + }) + .is_err() + { error!( source = "rpc", block_root = %hash, diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 292e894870..109c361ebe 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -9,13 +9,16 @@ use crate::{ sync::{manager::BlockProcessType, SyncMessage}, }; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; +use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::test_utils::{ get_kzg, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; +use gossipsub::MessageAcceptance; use itertools::Itertools; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::rpc::InboundRequestId; @@ -25,6 +28,7 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, }; +use matches::assert_matches; use slot_clock::SlotClock; use std::iter::Iterator; use std::sync::Arc; @@ -32,9 +36,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, DataColumnSidecarList, + AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, + SignedBeaconBlock, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, }; type E = MainnetEthSpec; @@ -56,15 +60,15 @@ struct TestRig { next_block: Arc>, next_blobs: Option>, next_data_columns: Option>, - attestations: Vec<(Attestation, SubnetId)>, - next_block_attestations: Vec<(Attestation, SubnetId)>, + attestations: Vec<(SingleAttestation, SubnetId)>, + next_block_attestations: Vec<(SingleAttestation, SubnetId)>, next_block_aggregate_attestations: Vec>, attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, beacon_processor_tx: BeaconProcessorSend, work_journal_rx: mpsc::Receiver<&'static str>, - _network_rx: mpsc::UnboundedReceiver>, + network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, duplicate_cache: DuplicateCache, network_beacon_processor: Arc>, @@ -83,19 +87,18 @@ impl Drop for TestRig { impl TestRig { pub async fn new(chain_length: u64) -> Self { - Self::new_parametric( - chain_length, - BeaconProcessorConfig::default().enable_backfill_rate_limiting, - ) - .await - } - - pub async fn new_parametric(chain_length: u64, enable_backfill_rate_limiting: bool) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = test_spec::(); spec.shard_committee_period = 2; - let spec = Arc::new(spec); + Self::new_parametric(chain_length, BeaconProcessorConfig::default(), spec).await + } + pub async fn new_parametric( + chain_length: u64, + beacon_processor_config: BeaconProcessorConfig, + spec: ChainSpec, + ) -> Self { + let spec = Arc::new(spec); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) @@ -126,13 +129,21 @@ impl TestRig { "precondition: current slot is one after head" ); + // Ensure there is a blob in the next block. Required for some tests. + harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .execution_block_generator() + .set_min_blob_count(1); let (next_block_tuple, next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; let head_state_root = head.beacon_state_root(); let attestations = harness - .get_unaggregated_attestations( + .get_single_attestations( &AttestationStrategy::AllValidators, &head.beacon_state, head_state_root, @@ -149,7 +160,7 @@ impl TestRig { ); let next_block_attestations = harness - .get_unaggregated_attestations( + .get_single_attestations( &AttestationStrategy::AllValidators, &next_state, next_block_tuple.0.state_root(), @@ -183,17 +194,11 @@ impl TestRig { let chain = harness.chain.clone(); - let (network_tx, _network_rx) = mpsc::unbounded_channel(); + let (network_tx, network_rx) = mpsc::unbounded_channel(); - let beacon_processor_config = BeaconProcessorConfig { - enable_backfill_rate_limiting, - ..Default::default() - }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx, } = BeaconProcessorChannels::new(&beacon_processor_config); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); @@ -237,7 +242,6 @@ impl TestRig { chain: harness.chain.clone(), network_tx, sync_tx, - reprocess_tx: work_reprocessing_tx.clone(), network_globals: network_globals.clone(), invalid_block_storage: InvalidBlockStorage::Disabled, executor: executor.clone(), @@ -252,8 +256,6 @@ impl TestRig { } .spawn_manager( beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx, Some(work_journal_tx), harness.chain.slot_clock.clone(), chain.spec.maximum_gossip_clock_disparity(), @@ -278,7 +280,7 @@ impl TestRig { ) .unwrap() .into_iter() - .filter(|c| network_globals.sampling_columns.contains(&c.index)) + .filter(|c| network_globals.sampling_columns().contains(&c.index)) .collect::>(); (None, Some(custody_columns)) @@ -304,7 +306,7 @@ impl TestRig { voluntary_exit, beacon_processor_tx, work_journal_rx, - _network_rx, + network_rx, _sync_rx, duplicate_cache, network_beacon_processor, @@ -364,22 +366,12 @@ impl TestRig { } } - pub fn custody_columns_count(&self) -> usize { - self.network_beacon_processor - .network_globals - .custody_columns_count() as usize - } - pub fn enqueue_rpc_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs( - Some(block_root), - self.next_block.clone(), - self.custody_columns_count(), - ), + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 0 }, ) @@ -391,11 +383,7 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs( - Some(block_root), - self.next_block.clone(), - self.custody_columns_count(), - ), + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -643,6 +631,50 @@ impl TestRig { assert_eq!(events, expected); } + + /// Listen for network messages and collect them for a specified duration or until reaching a count. + /// + /// Returns None if no messages were received, or Some(Vec) containing the received messages. + /// + /// # Arguments + /// + /// * `timeout` - Maximum duration to listen for messages + /// * `count` - Optional maximum number of messages to collect before returning + pub async fn receive_network_messages_with_timeout( + &mut self, + timeout: Duration, + count: Option, + ) -> Option>> { + let mut events = vec![]; + + let timeout_future = tokio::time::sleep(timeout); + tokio::pin!(timeout_future); + + loop { + // Break if we've received the requested count of messages + if let Some(target_count) = count { + if events.len() >= target_count { + break; + } + } + + tokio::select! { + _ = &mut timeout_future => break, + maybe_msg = self.network_rx.recv() => { + match maybe_msg { + Some(msg) => events.push(msg), + None => break, // Channel closed + } + } + } + } + + if events.is_empty() { + None + } else { + Some(events) + } + } } fn junk_peer_id() -> PeerId { @@ -692,6 +724,10 @@ async fn import_gossip_block_acceptably_early() { rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) .await; } + if num_data_columns > 0 { + rig.assert_event_journal_completes(&[WorkType::ColumnReconstruction]) + .await; + } // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for @@ -753,6 +789,60 @@ async fn import_gossip_block_unacceptably_early() { ); } +/// Data columns that have already been processed but unobserved should be propagated without re-importing. +#[tokio::test] +async fn accept_processed_gossip_data_columns_without_import() { + if test_spec::().fulu_fork_epoch.is_none() { + return; + }; + + let mut rig = TestRig::new(SMALL_CHAIN).await; + + // GIVEN the data columns have already been processed but unobserved. + // 1. verify data column with `DoNotObserve` to create verified but unobserved data columns. + // 2. put verified but unobserved data columns into the data availability cache. + let verified_data_columns: Vec<_> = rig + .next_data_columns + .clone() + .unwrap() + .into_iter() + .map(|data_column| { + let subnet_id = data_column.index; + validate_data_column_sidecar_for_gossip::<_, DoNotObserve>( + data_column, + subnet_id, + &rig.chain, + ) + .expect("should be valid data column") + }) + .collect(); + + let block_root = rig.next_block.canonical_root(); + rig.chain + .data_availability_checker + .put_gossip_verified_data_columns(block_root, verified_data_columns) + .expect("should put data columns into availability cache"); + + // WHEN an already processed but unobserved data column is received via gossip + rig.enqueue_gossip_data_columns(0); + + // THEN the data column should be propagated without re-importing (not sure if there's an easy way to test this) + let network_message = rig + .receive_network_messages_with_timeout(Duration::from_millis(100), Some(1)) + .await + .and_then(|mut vec| vec.pop()) + .expect("should receive network messages"); + + assert_matches!( + network_message, + NetworkMessage::ValidationResult { + propagation_source: _, + message_id: _, + validation_result: MessageAcceptance::Accept, + } + ); +} + /// Blocks that arrive on-time should be processed normally. #[tokio::test] async fn import_gossip_block_at_current_slot() { @@ -1157,11 +1247,25 @@ async fn test_rpc_block_reprocessing() { tokio::time::sleep(QUEUED_RPC_BLOCK_DELAY).await; rig.assert_event_journal(&[WorkType::RpcBlock.into()]).await; - // Add an extra delay for block processing - tokio::time::sleep(Duration::from_millis(10)).await; - // head should update to next block now since the duplicate - // cache handle was dropped. - assert_eq!(next_block_root, rig.head_root()); + + let max_retries = 3; + let mut success = false; + for _ in 0..max_retries { + // Add an extra delay for block processing + tokio::time::sleep(Duration::from_millis(10)).await; + // head should update to the next block now since the duplicate + // cache handle was dropped. + if next_block_root == rig.head_root() { + success = true; + break; + } + } + assert!( + success, + "expected head_root to be {:?} but was {:?}", + next_block_root, + rig.head_root() + ); } /// Ensure that backfill batches get rate-limited and processing is scheduled at specified intervals. @@ -1192,8 +1296,12 @@ async fn test_backfill_sync_processing() { /// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. #[tokio::test] async fn test_backfill_sync_processing_rate_limiting_disabled() { - let enable_backfill_rate_limiting = false; - let mut rig = TestRig::new_parametric(SMALL_CHAIN, enable_backfill_rate_limiting).await; + let beacon_processor_config = BeaconProcessorConfig { + enable_backfill_rate_limiting: false, + ..Default::default() + }; + let mut rig = + TestRig::new_parametric(SMALL_CHAIN, beacon_processor_config, test_spec::()).await; for _ in 0..3 { rig.enqueue_backfill_batch(); @@ -1236,7 +1344,7 @@ async fn test_blobs_by_range() { .unwrap_or(0); } let mut actual_count = 0; - while let Some(next) = rig._network_rx.recv().await { + while let Some(next) = rig.network_rx.recv().await { if let NetworkMessage::SendResponse { peer_id: _, response: Response::BlobsByRange(blob), diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 9c112dba86..938b08a315 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -86,5 +86,9 @@ mod tests { .unwrap(); let dht: PersistedDht = store.get_item(&DHT_DB_KEY).unwrap().unwrap(); assert_eq!(dht.enrs, enrs); + + // This hardcoded length check is for database schema compatibility. If the on-disk format + // of `PersistedDht` changes, we need a DB schema change. + assert_eq!(dht.as_store_bytes().len(), 136); } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 2a7bc597c2..5d5daae4ae 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -10,9 +10,7 @@ use crate::service::NetworkMessage; use crate::status::status_message; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use beacon_processor::{ - work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, -}; +use beacon_processor::{BeaconProcessorSend, DuplicateCache}; use futures::prelude::*; use lighthouse_network::rpc::*; use lighthouse_network::{ @@ -87,7 +85,6 @@ impl Router { executor: task_executor::TaskExecutor, invalid_block_storage: InvalidBlockStorage, beacon_processor_send: BeaconProcessorSend, - beacon_processor_reprocess_tx: mpsc::Sender, fork_context: Arc, ) -> Result>, String> { trace!("Service starting"); @@ -103,7 +100,6 @@ impl Router { chain: beacon_chain.clone(), network_tx: network_send.clone(), sync_tx: sync_send.clone(), - reprocess_tx: beacon_processor_reprocess_tx, network_globals: network_globals.clone(), invalid_block_storage, executor: executor.clone(), @@ -354,17 +350,6 @@ impl Router { timestamp_now(), ), ), - PubsubMessage::SingleAttestation(subnet_attestation) => self - .handle_beacon_processor_send_result( - self.network_beacon_processor.send_single_attestation( - message_id, - peer_id, - subnet_attestation.1, - subnet_attestation.0, - should_process, - timestamp_now(), - ), - ), PubsubMessage::BeaconBlock(block) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_beacon_block( message_id, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 77204b455d..0a6d515232 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -6,10 +6,11 @@ use crate::router::{Router, RouterMessage}; use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; use crate::NetworkConfig; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; +use beacon_processor::BeaconProcessorSend; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; + use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::RequestType; use lighthouse_network::service::Network; @@ -105,6 +106,12 @@ pub enum NetworkMessage { ConnectTrustedPeer(Enr), /// Disconnect from a trusted peer and remove it from the `trusted_peers` mapping. DisconnectTrustedPeer(Enr), + /// Custody group count changed due to a change in validators' weight. + /// Subscribe to new subnets and update ENR metadata. + CustodyCountChanged { + new_custody_group_count: u64, + sampling_count: u64, + }, } /// Messages triggered by validators that may trigger a subscription to a subnet. @@ -204,7 +211,6 @@ impl NetworkService { executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, - beacon_processor_reprocess_tx: mpsc::Sender, ) -> Result< ( NetworkService, @@ -270,7 +276,15 @@ impl NetworkService { }; // launch libp2p service - let (mut libp2p, network_globals) = Network::new(executor.clone(), service_context).await?; + let (mut libp2p, network_globals) = Network::new( + executor.clone(), + service_context, + beacon_chain + .data_availability_checker + .custody_context() + .custody_group_count_at_head(&beacon_chain.spec), + ) + .await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -300,7 +314,6 @@ impl NetworkService { executor.clone(), invalid_block_storage, beacon_processor_send, - beacon_processor_reprocess_tx, fork_context.clone(), )?; @@ -352,7 +365,6 @@ impl NetworkService { executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, - beacon_processor_reprocess_tx: mpsc::Sender, ) -> Result<(Arc>, NetworkSenders), String> { let (network_service, network_globals, network_senders) = Self::build( beacon_chain, @@ -360,7 +372,6 @@ impl NetworkService { executor.clone(), libp2p_registry, beacon_processor_send, - beacon_processor_reprocess_tx, ) .await?; @@ -539,23 +550,7 @@ impl NetworkService { // the attestation, else we just just propagate the Attestation. let should_process = self.subnet_service.should_process_attestation( Subnet::Attestation(subnet_id), - attestation.data(), - ); - self.send_to_router(RouterMessage::PubsubMessage( - id, - source, - message, - should_process, - )); - } - PubsubMessage::SingleAttestation(ref subnet_and_attestation) => { - let subnet_id = subnet_and_attestation.0; - let single_attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = self.subnet_service.should_process_attestation( - Subnet::Attestation(subnet_id), - &single_attestation.data, + &attestation.data, ); self.send_to_router(RouterMessage::PubsubMessage( id, @@ -745,6 +740,22 @@ impl NetworkService { ); } } + NetworkMessage::CustodyCountChanged { + new_custody_group_count, + sampling_count, + } => { + // subscribe to `sampling_count` subnets + self.libp2p + .subscribe_new_data_column_subnets(sampling_count); + if self + .network_globals + .config + .advertise_false_custody_group_count + .is_none() + { + self.libp2p.update_enr_cgc(new_custody_group_count); + } + } } } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 15c3321e94..db34211747 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -58,8 +58,6 @@ fn test_dht_persistence() { let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx: _beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx: _work_reprocessing_rx, } = <_>::default(); let _network_service = NetworkService::start( @@ -68,7 +66,6 @@ fn test_dht_persistence() { executor, None, beacon_processor_tx, - work_reprocessing_tx, ) .await .unwrap(); @@ -137,7 +134,6 @@ fn test_removing_topic_weight_on_old_topics() { executor.clone(), None, beacon_processor_channels.beacon_processor_tx, - beacon_processor_channels.work_reprocessing_tx, ) .await .unwrap() diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 1210926d34..be0d7c063b 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use types::{EthSpec, FixedBytesExtended, Hash256}; -use lighthouse_network::rpc::StatusMessage; +use lighthouse_network::rpc::{methods::StatusMessageV2, StatusMessage}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. /// /// NOTE: The purpose of this is simply to obtain a `StatusMessage` from the `BeaconChain` without @@ -29,11 +29,14 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) finalized_checkpoint.root = Hash256::zero(); } - StatusMessage { + let earliest_available_slot = beacon_chain.store.get_anchor_info().oldest_block_slot; + + StatusMessage::V2(StatusMessageV2 { fork_digest, finalized_root: finalized_checkpoint.root, finalized_epoch: finalized_checkpoint.epoch, head_root: cached_head.head_block_root(), head_slot: cached_head.head_slot(), - } + earliest_available_slot, + }) } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index dd4724b261..0da27c6a21 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -600,7 +600,7 @@ impl Stream for AttestationService { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // Update the waker if needed. if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { + if !waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); } } else { diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index 5340538e52..a8ea6ed518 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -114,7 +114,6 @@ impl SubnetService { /// Establish the service based on the passed configuration. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -229,7 +228,6 @@ impl SubnetService { /// This returns a result simply for the ergonomics of using ?. The result can be /// safely dropped. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -369,7 +367,6 @@ impl SubnetService { /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip /// verification, re-propagates and returns false. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -399,7 +396,6 @@ impl SubnetService { /// Adds an event to the event queue and notifies that this service is ready to be polled /// again. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -416,7 +412,6 @@ impl SubnetService { /// If there is sufficient time, queues a peer discovery request for all the required subnets. // NOTE: Sending early subscriptions results in early searching for peers on subnets. #[instrument(parent = None, - level = "info", name = "subnet_service", skip_all )] @@ -468,7 +463,6 @@ impl SubnetService { // Subscribes to the subnet if it should be done immediately, or schedules it if required. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -526,7 +520,6 @@ impl SubnetService { /// Adds a subscription event to the sync subnet. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -581,7 +574,6 @@ impl SubnetService { /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send /// out the appropriate events. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -642,7 +634,6 @@ impl SubnetService { // Unsubscribes from a subnet that was removed. #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -665,7 +656,6 @@ impl Stream for SubnetService { type Item = SubnetServiceMessage; #[instrument(parent = None, - level = "info", fields(service = "subnet_service"), name = "subnet_service", skip_all @@ -673,7 +663,7 @@ impl Stream for SubnetService { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // Update the waker if needed. if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { + if !waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); } } else { diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 59ec278a95..6b3834e195 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -319,7 +319,7 @@ impl Stream for SyncCommitteeService { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // update the waker if needed if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { + if !waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); } } else { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 7fdf9047fc..86d1be08ec 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -1,7 +1,6 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, - eth1_chain::CachingEth1Backend, test_utils::get_kzg, BeaconChain, }; @@ -27,7 +26,6 @@ const TEST_LOG_LEVEL: Option<&str> = None; type TestBeaconChainType = Witness< SystemTimeSlotClock, - CachingEth1Backend, MainnetEthSpec, MemoryStore, MemoryStore, @@ -70,8 +68,6 @@ impl TestBeaconChain { .expect("should generate interop state"), ) .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build dummy backend") .slot_clock(SystemTimeSlotClock::new( Slot::new(0), Duration::from_secs(recent_genesis_time()), diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index fcef06271f..ba66e41aca 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -148,7 +148,6 @@ pub struct BackFillSync { impl BackFillSync { #[instrument(parent = None, - level = "info", name = "backfill_sync", skip_all )] @@ -193,7 +192,6 @@ impl BackFillSync { /// Pauses the backfill sync if it's currently syncing. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -210,7 +208,6 @@ impl BackFillSync { /// If resuming is successful, reports back the current syncing metrics. #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -291,7 +288,6 @@ impl BackFillSync { /// If we are in a failed state, update a local variable to indicate we are able to restart /// the failed sync on the next attempt. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -305,7 +301,6 @@ impl BackFillSync { /// A peer has disconnected. /// If the peer has active batches, those are considered failed and re-requested. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -325,7 +320,6 @@ impl BackFillSync { /// /// If the batch exists it is re-requested. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -368,7 +362,6 @@ impl BackFillSync { /// join the system. /// The sync manager should update the global sync state on failure. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -425,7 +418,6 @@ impl BackFillSync { /// /// This resets past variables, to allow for a fresh start when resuming. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -462,7 +454,6 @@ impl BackFillSync { /// Processes the batch with the given id. /// The batch must exist and be ready for processing #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -526,7 +517,6 @@ impl BackFillSync { /// of the batch processor. /// If an error is returned the BackFill sync has failed. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -684,7 +674,6 @@ impl BackFillSync { /// Processes the next ready batch. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -753,7 +742,6 @@ impl BackFillSync { /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -850,7 +838,6 @@ impl BackFillSync { /// have received are incorrect or invalid. This indicates the peer has not performed as /// intended and can result in downvoting a peer. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -907,7 +894,6 @@ impl BackFillSync { /// Requests the batch assigned to the given id from a given peer. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -984,7 +970,6 @@ impl BackFillSync { /// When resuming a chain, this function searches for batches that need to be re-downloaded and /// transitions their state to redownload the batch. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1014,7 +999,6 @@ impl BackFillSync { /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1044,7 +1028,6 @@ impl BackFillSync { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1111,7 +1094,6 @@ impl BackFillSync { /// This errors if the beacon chain indicates that backfill sync has already completed or is /// not required. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1130,7 +1112,6 @@ impl BackFillSync { /// Checks with the beacon chain if backfill sync has completed. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1151,7 +1132,6 @@ impl BackFillSync { /// Checks if backfill would complete by syncing to `start_epoch`. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1166,7 +1146,6 @@ impl BackFillSync { /// Updates the global network state indicating the current state of a backfill sync. #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1176,7 +1155,6 @@ impl BackFillSync { } #[instrument(parent = None, - level = "info", fields(service = "backfill_sync"), name = "backfill_sync", skip_all @@ -1243,6 +1221,7 @@ mod tests { head_root: Hash256::random(), finalized_epoch, finalized_root: Hash256::random(), + earliest_available_slot: None, }, }, ); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 8c884f644e..c8bd8c170f 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -106,7 +106,7 @@ pub type SingleLookupId = u32; enum Action { Retry, ParentUnknown { parent_root: Hash256 }, - Drop, + Drop(/* reason: */ String), Continue, } @@ -127,7 +127,7 @@ use lighthouse_network::service::api_types::Id; pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec); impl BlockLookups { - #[instrument(parent = None,level = "info", fields(service = "lookup_sync"), name = "lookup_sync")] + #[instrument(parent = None, fields(service = "lookup_sync"), name = "lookup_sync")] pub fn new() -> Self { Self { failed_chains: LRUTimeCache::new(Duration::from_secs( @@ -139,7 +139,6 @@ impl BlockLookups { #[cfg(test)] #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -150,7 +149,6 @@ impl BlockLookups { #[cfg(test)] #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -161,7 +159,6 @@ impl BlockLookups { #[cfg(test)] #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -175,7 +172,6 @@ impl BlockLookups { /// Returns a vec of all parent lookup chains by tip, in descending slot order (tip first) #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -194,19 +190,21 @@ impl BlockLookups { /// Creates a parent lookup for the block with the given `block_root` and immediately triggers it. /// If a parent lookup exists or is triggered, a current lookup will be created. + /// + /// Returns true if the lookup is created or already exists #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all )] + #[must_use = "only reference the new lookup if returns true"] pub fn search_child_and_parent( &mut self, block_root: Hash256, block_component: BlockComponent, peer_id: PeerId, cx: &mut SyncNetworkContext, - ) { + ) -> bool { let parent_root = block_component.parent_root(); let parent_lookup_exists = @@ -223,25 +221,28 @@ impl BlockLookups { // the lookup with zero peers to house the block components. &[], cx, - ); + ) + } else { + false } } /// Seach a block whose parent root is unknown. + /// /// Returns true if the lookup is created or already exists #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all )] + #[must_use = "only reference the new lookup if returns true"] pub fn search_unknown_block( &mut self, block_root: Hash256, peer_source: &[PeerId], cx: &mut SyncNetworkContext, - ) { - self.new_current_lookup(block_root, None, None, peer_source, cx); + ) -> bool { + self.new_current_lookup(block_root, None, None, peer_source, cx) } /// A block or blob triggers the search of a parent. @@ -251,11 +252,11 @@ impl BlockLookups { /// /// Returns true if the lookup is created or already exists #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all )] + #[must_use = "only reference the new lookup if returns true"] pub fn search_parent_of_child( &mut self, block_root_to_search: Hash256, @@ -358,11 +359,11 @@ impl BlockLookups { /// constructed. /// Returns true if the lookup is created or already exists #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all )] + #[must_use = "only reference the new lookup if returns true"] fn new_current_lookup( &mut self, block_root: Hash256, @@ -466,7 +467,6 @@ impl BlockLookups { /// Process a block or blob response received from a single lookup request. #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -557,7 +557,6 @@ impl BlockLookups { /* Error responses */ #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -571,7 +570,6 @@ impl BlockLookups { /* Processing responses */ #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -597,7 +595,6 @@ impl BlockLookups { } #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -656,7 +653,7 @@ impl BlockLookups { // This is unreachable because RPC blocks do not undergo gossip verification, and // this error can *only* come from gossip verification. error!(?block_root, "Single block lookup hit unreachable condition"); - Action::Drop + Action::Drop("DuplicateImportStatusUnknown".to_owned()) } BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. @@ -665,14 +662,14 @@ impl BlockLookups { component = ?R::response_type(), "Lookup component processing ignored, cpu might be overloaded" ); - Action::Drop + Action::Drop("Block processing ignored".to_owned()) } BlockProcessingResult::Err(e) => { match e { BlockError::BeaconChainError(e) => { // Internal error error!(%block_root, error = ?e, "Beacon chain error processing lookup component"); - Action::Drop + Action::Drop(format!("{e:?}")) } BlockError::ParentUnknown { parent_root, .. } => { // Reverts the status of this request to `AwaitingProcessing` holding the @@ -691,7 +688,7 @@ impl BlockLookups { error = ?e, "Single block lookup failed. Execution layer is offline / unsynced / misconfigured" ); - Action::Drop + Action::Drop(format!("{e:?}")) } BlockError::AvailabilityCheck(e) if e.category() == AvailabilityCheckErrorCategory::Internal => @@ -703,7 +700,7 @@ impl BlockLookups { // lookup state transition. This error invalidates both blob and block requests, and we don't know the // state of both requests. Blobs may have already successfullly processed for example. // We opt to drop the lookup instead. - Action::Drop + Action::Drop(format!("{e:?}")) } other => { debug!( @@ -757,19 +754,32 @@ impl BlockLookups { } Action::ParentUnknown { parent_root } => { let peers = lookup.all_peers(); + // Mark lookup as awaiting **before** creating the parent lookup. At this point the + // lookup maybe inconsistent. lookup.set_awaiting_parent(parent_root); - debug!( - id = lookup.id, - ?block_root, - ?parent_root, - "Marking lookup as awaiting parent" - ); - self.search_parent_of_child(parent_root, block_root, &peers, cx); - Ok(LookupResult::Pending) + let parent_lookup_exists = + self.search_parent_of_child(parent_root, block_root, &peers, cx); + if parent_lookup_exists { + // The parent lookup exist or has been created. It's safe for `lookup` to + // reference the parent as awaiting. + debug!( + id = lookup_id, + ?block_root, + ?parent_root, + "Marking lookup as awaiting parent" + ); + Ok(LookupResult::Pending) + } else { + // The parent lookup is faulty and was not created, we must drop the `lookup` as + // it's in an inconsistent state. We must drop all of its children too. + Err(LookupRequestError::Failed(format!( + "Parent lookup is faulty {parent_root:?}" + ))) + } } - Action::Drop => { + Action::Drop(reason) => { // Drop with noop - Err(LookupRequestError::Failed) + Err(LookupRequestError::Failed(reason)) } Action::Continue => { // Drop this completed lookup only @@ -779,7 +789,6 @@ impl BlockLookups { } #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -810,7 +819,6 @@ impl BlockLookups { /// Makes progress on the immediate children of `block_root` #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -841,7 +849,6 @@ impl BlockLookups { /// the parent to make progress to resolve, therefore we must drop them if the parent is /// dropped. #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -871,7 +878,6 @@ impl BlockLookups { /// Common handler a lookup request error, drop it and update metrics /// Returns true if the lookup is created or already exists #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -914,7 +920,6 @@ impl BlockLookups { /// Drops all the single block requests and returns how many requests were dropped. #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -926,7 +931,6 @@ impl BlockLookups { } #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -940,7 +944,6 @@ impl BlockLookups { /// Perform some prune operations on lookups on some interval #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -969,7 +972,6 @@ impl BlockLookups { /// Instead there's no negative for keeping lookups with no peers around for some time. If we /// regularly prune them, it should not be a memory concern (TODO: maybe yes!). #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -1012,7 +1014,6 @@ impl BlockLookups { /// - One single clear warn level log per stuck incident /// - If the original bug is sporadic, it reduces the time a node is stuck from forever to 15 min #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -1055,7 +1056,6 @@ impl BlockLookups { /// Recursively find the oldest ancestor lookup of another lookup #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all @@ -1085,7 +1085,6 @@ impl BlockLookups { /// Note: Takes a `lookup_id` as argument to allow recursion on mutable lookups, without having /// to duplicate the code to add peers to a lookup #[instrument(parent = None, - level = "info", fields(service = "lookup_sync"), name = "lookup_sync", skip_all diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 3789dbe91e..30947cf1f0 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -40,7 +40,7 @@ pub enum LookupRequestError { /// Inconsistent lookup request state BadState(String), /// Lookup failed for some other reason and should be dropped - Failed, + Failed(/* reason: */ String), /// Received MissingComponents when all components have been processed. This should never /// happen, and indicates some internal bug MissingComponentsAfterAllProcessed, diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 99428b0c80..0418ab4553 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -257,17 +257,11 @@ impl RangeBlockComponentsRequest { )); } - RpcBlock::new_with_custody_columns( - Some(block_root), - block, - custody_columns, - expects_custody_columns.len(), - spec, - ) - .map_err(|e| format!("{e:?}"))? + RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, spec) + .map_err(|e| format!("{e:?}"))? } else { // Block has no data, expects zero columns - RpcBlock::new_without_blobs(Some(block_root), block, 0) + RpcBlock::new_without_blobs(Some(block_root), block) }); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 473881f182..d11a18ed0a 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -398,10 +398,11 @@ impl SyncManager { // ensure the beacon chain still exists let status = self.chain.status_message(); let local = SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, + head_slot: *status.head_slot(), + head_root: *status.head_root(), + finalized_epoch: *status.finalized_epoch(), + finalized_root: *status.finalized_root(), + earliest_available_slot: status.earliest_available_slot().ok().cloned(), }; let sync_type = remote_sync_type(&local, &remote, &self.chain); @@ -450,10 +451,11 @@ impl SyncManager { ) { let status = self.chain.status_message(); let local = SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, + head_slot: *status.head_slot(), + head_root: *status.head_root(), + finalized_epoch: *status.finalized_epoch(), + finalized_root: *status.finalized_root(), + earliest_available_slot: status.earliest_available_slot().ok().cloned(), }; let head_slot = head_slot.unwrap_or_else(|| { @@ -471,6 +473,7 @@ impl SyncManager { // Set finalized to same as local to trigger Head sync finalized_epoch: local.finalized_epoch, finalized_root: local.finalized_root, + earliest_available_slot: local.earliest_available_slot, }; for peer_id in peers { @@ -929,12 +932,20 @@ impl SyncManager { ) { match self.should_search_for_block(Some(slot), &peer_id) { Ok(_) => { - self.block_lookups.search_child_and_parent( + if self.block_lookups.search_child_and_parent( block_root, block_component, peer_id, &mut self.network, - ); + ) { + // Lookup created. No need to log here it's logged in `new_current_lookup` + } else { + debug!( + ?block_root, + ?parent_root, + "No lookup created for child and parent" + ); + } } Err(reason) => { debug!(%block_root, %parent_root, reason, "Ignoring unknown parent request"); @@ -945,8 +956,15 @@ impl SyncManager { fn handle_unknown_block_root(&mut self, peer_id: PeerId, block_root: Hash256) { match self.should_search_for_block(None, &peer_id) { Ok(_) => { - self.block_lookups - .search_unknown_block(block_root, &[peer_id], &mut self.network); + if self.block_lookups.search_unknown_block( + block_root, + &[peer_id], + &mut self.network, + ) { + // Lookup created. No need to log here it's logged in `new_current_lookup` + } else { + debug!(?block_root, "No lookup created for unknown block"); + } } Err(reason) => { debug!(%block_root, reason, "Ignoring unknown block request"); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 58641f8606..d0e62e4ada 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -384,11 +384,12 @@ impl SyncNetworkContext { for peer_id in peers { debug!( peer = %peer_id, - fork_digest = ?status_message.fork_digest, - finalized_root = ?status_message.finalized_root, - finalized_epoch = ?status_message.finalized_epoch, - head_root = %status_message.head_root, - head_slot = %status_message.head_slot, + fork_digest = ?status_message.fork_digest(), + finalized_root = ?status_message.finalized_root(), + finalized_epoch = ?status_message.finalized_epoch(), + head_root = %status_message.head_root(), + head_slot = %status_message.head_slot(), + earliest_available_slot = ?status_message.earliest_available_slot(), "Sending Status Request" ); @@ -476,7 +477,7 @@ impl SyncNetworkContext { // Attempt to find all required custody peers before sending any request or creating an ID let columns_by_range_peers_to_request = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let column_indexes = self.network_globals().sampling_columns.clone(); + let column_indexes = self.network_globals().sampling_columns(); Some(self.select_columns_by_range_peers_to_request( &column_indexes, peers, @@ -534,7 +535,7 @@ impl SyncNetworkContext { ( data_column_requests, self.network_globals() - .sampling_columns + .sampling_columns() .clone() .iter() .copied() @@ -928,8 +929,7 @@ impl SyncNetworkContext { // Include only the blob indexes not yet imported (received through gossip) let custody_indexes_to_fetch = self .network_globals() - .sampling_columns - .clone() + .sampling_columns() .into_iter() .filter(|index| !custody_indexes_imported.contains(index)) .collect::>(); @@ -1487,11 +1487,7 @@ impl SyncNetworkContext { .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - let block = RpcBlock::new_without_blobs( - Some(block_root), - block, - self.network_globals().custody_columns_count() as usize, - ); + let block = RpcBlock::new_without_blobs(Some(block_root), block); debug!(block = ?block_root, id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 59b751787e..4ad77176aa 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -29,7 +29,7 @@ pub struct Sampling { } impl Sampling { - #[instrument(parent = None,level = "info", fields(service = "sampling"), name = "sampling")] + #[instrument(parent = None, fields(service = "sampling"), name = "sampling")] pub fn new(sampling_config: SamplingConfig) -> Self { Self { requests: <_>::default(), @@ -39,7 +39,6 @@ impl Sampling { #[cfg(test)] #[instrument(parent = None, - level = "info", fields(service = "sampling"), name = "sampling", skip_all @@ -50,7 +49,6 @@ impl Sampling { #[cfg(test)] #[instrument(parent = None, - level = "info", fields(service = "sampling"), name = "sampling", skip_all @@ -73,7 +71,6 @@ impl Sampling { /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. /// - `None`: Request still active, requester should do no action #[instrument(parent = None, - level = "info", fields(service = "sampling"), name = "sampling", skip_all @@ -124,7 +121,6 @@ impl Sampling { /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. /// - `None`: Request still active, requester should do no action #[instrument(parent = None, - level = "info", fields(service = "sampling"), name = "sampling", skip_all @@ -154,7 +150,6 @@ impl Sampling { /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. /// - `None`: Request still active, requester should do no action #[instrument(parent = None, - level = "info", fields(service = "sampling"), name = "sampling", skip_all @@ -179,7 +174,6 @@ impl Sampling { /// conveniently), to an Option first format to use an `if let Some() { act on result }` pattern /// in the sync manager. #[instrument(parent = None, - level = "info", fields(service = "sampling"), name = "sampling", skip_all diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index be01734417..cc49c43711 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -153,25 +153,25 @@ impl SyncingChain { } /// Check if the chain has peers from which to process batches. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None,fields(chain = self.id , service = "range_sync"), skip_all)] pub fn available_peers(&self) -> usize { self.peers.len() } /// Get the chain's id. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn id(&self) -> ChainId { self.id } /// Peers currently syncing this chain. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn peers(&self) -> impl Iterator + '_ { self.peers.iter().cloned() } /// Progress in epochs made by the chain - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn processed_epochs(&self) -> u64 { self.processing_target .saturating_sub(self.start_epoch) @@ -179,7 +179,7 @@ impl SyncingChain { } /// Returns the total count of pending blocks in all the batches of this chain - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn pending_blocks(&self) -> usize { self.batches .values() @@ -189,7 +189,7 @@ impl SyncingChain { /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn remove_peer(&mut self, peer_id: &PeerId) -> ProcessingResult { self.peers.remove(peer_id); @@ -201,7 +201,7 @@ impl SyncingChain { } /// Returns the latest slot number that has been processed. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn current_processed_slot(&self) -> Slot { // the last slot we processed was included in the previous batch, and corresponds to the // first slot of the current target epoch @@ -211,7 +211,7 @@ impl SyncingChain { /// A block has been received for a batch on this chain. /// If the block correctly completes the batch it will be processed if possible. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn on_block_response( &mut self, network: &mut SyncNetworkContext, @@ -258,7 +258,7 @@ impl SyncingChain { /// Processes the batch with the given id. /// The batch must exist and be ready for processing - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn process_batch( &mut self, network: &mut SyncNetworkContext, @@ -306,7 +306,7 @@ impl SyncingChain { } /// Processes the next ready batch, prioritizing optimistic batches over the processing target. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn process_completed_batches( &mut self, network: &mut SyncNetworkContext, @@ -416,7 +416,7 @@ impl SyncingChain { /// The block processor has completed processing a batch. This function handles the result /// of the batch processor. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn on_batch_process_result( &mut self, network: &mut SyncNetworkContext, @@ -571,7 +571,7 @@ impl SyncingChain { } } - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn reject_optimistic_batch( &mut self, network: &mut SyncNetworkContext, @@ -606,7 +606,7 @@ impl SyncingChain { /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. #[allow(clippy::modulo_one)] - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch <= self.start_epoch { @@ -710,7 +710,7 @@ impl SyncingChain { /// These events occur when a peer has successfully responded with blocks, but the blocks we /// have received are incorrect or invalid. This indicates the peer has not performed as /// intended and can result in downvoting a peer. - #[instrument(parent = None,level = "info", fields(service = self.id, network), skip_all)] + #[instrument(parent = None, fields(service = self.id, network), skip_all)] fn handle_invalid_batch( &mut self, network: &mut SyncNetworkContext, @@ -770,7 +770,7 @@ impl SyncingChain { /// This chain has been requested to start syncing. /// /// This could be new chain, or an old chain that is being resumed. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn start_syncing( &mut self, network: &mut SyncNetworkContext, @@ -809,7 +809,7 @@ impl SyncingChain { /// Add a peer to the chain. /// /// If the chain is active, this starts requesting batches from this peer. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn add_peer( &mut self, network: &mut SyncNetworkContext, @@ -822,7 +822,7 @@ impl SyncingChain { /// An RPC error has occurred. /// /// If the batch exists it is re-requested. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn inject_error( &mut self, network: &mut SyncNetworkContext, @@ -880,7 +880,7 @@ impl SyncingChain { } /// Requests the batch assigned to the given id from a given peer. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn send_batch( &mut self, network: &mut SyncNetworkContext, @@ -959,7 +959,7 @@ impl SyncingChain { } /// Returns true if this chain is currently syncing. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn is_syncing(&self) -> bool { match self.state { ChainSyncingState::Syncing => true, @@ -969,7 +969,7 @@ impl SyncingChain { /// Kickstarts the chain by sending for processing batches that are ready and requesting more /// batches if needed. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] pub fn resume( &mut self, network: &mut SyncNetworkContext, @@ -982,7 +982,7 @@ impl SyncingChain { /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn request_batches(&mut self, network: &mut SyncNetworkContext) -> ProcessingResult { if !matches!(self.state, ChainSyncingState::Syncing) { return Ok(KeepChain); @@ -1032,7 +1032,7 @@ impl SyncingChain { // Require peers on all sampling column subnets before sending batches let peers_on_all_custody_subnets = network .network_globals() - .sampling_subnets + .sampling_subnets() .iter() .all(|subnet_id| { let peer_count = network @@ -1052,7 +1052,7 @@ impl SyncingChain { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond the target head slot if self @@ -1115,7 +1115,7 @@ impl SyncingChain { /// This produces a string of the form: [D,E,E,E,E] /// to indicate the current buffer state of the chain. The symbols are defined on each of the /// batch states. See [BatchState::visualize] for symbol definitions. - #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] + #[instrument(parent = None, fields(chain = self.id , service = "range_sync"), skip_all)] fn visualize_batch_state(&self) -> String { let mut visualization_string = String::with_capacity((BATCH_BUFFER_SIZE * 3) as usize); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 1ec1440991..f34816d1de 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -82,7 +82,6 @@ where T: BeaconChainTypes, { #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -104,7 +103,6 @@ where } #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -119,7 +117,6 @@ where /// chain, this may result in a different finalized chain from syncing as finalized chains are /// prioritised by peer-pool size. #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -219,7 +216,6 @@ where /// This function finds the chain that made this request. Once found, processes the result. /// This request could complete a chain or simply add to its progress. #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -255,7 +251,6 @@ where } #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -293,7 +288,6 @@ where /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -311,7 +305,6 @@ where /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain. #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -335,7 +328,6 @@ where /// Check to see if the request corresponds to a pending batch. If so, re-request it if possible, if there have /// been too many failed attempts for the batch, remove the chain. #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -371,7 +363,6 @@ where } #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all @@ -411,10 +402,11 @@ where let status = self.beacon_chain.status_message(); let local = SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, + head_slot: *status.head_slot(), + head_root: *status.head_root(), + finalized_epoch: *status.finalized_epoch(), + finalized_root: *status.finalized_root(), + earliest_available_slot: status.earliest_available_slot().ok().cloned(), }; // update the state of the collection @@ -424,7 +416,6 @@ where /// Kickstarts sync. #[instrument(parent = None, - level = "info", fields(component = "range_sync"), name = "range_sync", skip_all diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 38095ec434..a2c359c87e 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -14,6 +14,7 @@ use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; +use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::{ blob_verification::GossipVerifiedBlob, block_verification_types::{AsBlock, BlockImportData}, @@ -1204,12 +1205,8 @@ impl TestRig { payload_verification_status: PayloadVerificationStatus::Verified, is_valid_merge_transition_block: false, }; - let executed_block = AvailabilityPendingExecutedBlock::new( - block, - import_data, - payload_verification_outcome, - self.network_globals.custody_columns_count() as usize, - ); + let executed_block = + AvailabilityPendingExecutedBlock::new(block, import_data, payload_verification_outcome); match self .harness .chain @@ -1229,7 +1226,12 @@ impl TestRig { .harness .chain .data_availability_checker - .put_gossip_blob(GossipVerifiedBlob::__assumed_valid(blob.into())) + .put_gossip_verified_blobs( + blob.block_root(), + std::iter::once(GossipVerifiedBlob::<_, Observe>::__assumed_valid( + blob.into(), + )), + ) .unwrap() { Availability::Available(_) => panic!("blob removed from da_checker, available"), @@ -1719,6 +1721,63 @@ fn test_parent_lookup_too_deep_grow_ancestor() { rig.assert_failed_chain(chain_hash); } +// Regression test for https://github.com/sigp/lighthouse/pull/7118 +#[test] +fn test_child_lookup_not_created_for_failed_chain_parent_after_processing() { + // GIVEN: A parent chain longer than PARENT_DEPTH_TOLERANCE. + let mut rig = TestRig::test_setup(); + let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE + 1); + let peer_id = rig.new_connected_peer(); + + // The child of the trigger block to be used to extend the chain. + let trigger_block_child = blocks.pop().unwrap(); + // The trigger block that starts the lookup. + let trigger_block = blocks.pop().unwrap(); + let tip_root = trigger_block.canonical_root(); + + // Trigger the initial unknown parent block for the tip. + rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); + + // Simulate the lookup chain building up via `ParentUnknown` errors. + for block in blocks.into_iter().rev() { + let id = rig.expect_block_parent_request(block.canonical_root()); + rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); + rig.parent_lookup_block_response(id, peer_id, None); + rig.expect_block_process(ResponseType::Block); + rig.parent_block_processed( + tip_root, + BlockProcessingResult::Err(BlockError::ParentUnknown { + parent_root: block.parent_root(), + }), + ); + } + + // At this point, the chain should have been deemed too deep and pruned. + // The tip root should have been inserted into failed chains. + rig.assert_failed_chain(tip_root); + rig.expect_no_penalty_for(peer_id); + + // WHEN: Trigger the extending block that points to the tip. + let trigger_block_child_root = trigger_block_child.canonical_root(); + rig.trigger_unknown_block_from_attestation(trigger_block_child_root, peer_id); + let id = rig.expect_block_lookup_request(trigger_block_child_root); + rig.single_lookup_block_response(id, peer_id, Some(trigger_block_child.clone())); + rig.single_lookup_block_response(id, peer_id, None); + rig.expect_block_process(ResponseType::Block); + rig.single_block_component_processed( + id.lookup_id, + BlockProcessingResult::Err(BlockError::ParentUnknown { + parent_root: tip_root, + }), + ); + + // THEN: The extending block should not create a lookup because the tip was inserted into failed chains. + rig.expect_no_active_lookups(); + // AND: The peer should be penalized for extending a failed chain. + rig.expect_single_penalty(peer_id, "failed_chain"); + rig.expect_empty_network(); +} + #[test] fn test_parent_lookup_too_deep_grow_tip() { let mut rig = TestRig::test_setup(); diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 3dca457108..1cc11e0152 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -3,7 +3,6 @@ use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; use crate::NetworkMessage; use beacon_chain::builder::Witness; -use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_processor::WorkEvent; use lighthouse_network::NetworkGlobals; @@ -22,7 +21,7 @@ use types::{ChainSpec, ForkName, MinimalEthSpec as E}; mod lookups; mod range; -type T = Witness, E, MemoryStore, MemoryStore>; +type T = Witness, MemoryStore>; /// This test utility enables integration testing of Lighthouse sync components. /// diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 932f485dd0..fa1e057765 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -11,9 +11,9 @@ use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecut use beacon_processor::WorkType; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV2, + OldBlocksByRangeRequestV2, StatusMessageV2, }; -use lighthouse_network::rpc::{RequestType, StatusMessage}; +use lighthouse_network::rpc::RequestType; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, SyncRequestId, @@ -98,6 +98,7 @@ impl TestRig { finalized_root, head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), head_root: Hash256::random(), + earliest_available_slot: None, }) } @@ -109,22 +110,25 @@ impl TestRig { finalized_root: Hash256::random(), head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), head_root: Hash256::random(), + earliest_available_slot: None, } } fn local_info(&self) -> SyncInfo { - let StatusMessage { + let StatusMessageV2 { fork_digest: _, finalized_root, finalized_epoch, head_root, head_slot, - } = self.harness.chain.status_message(); + earliest_available_slot, + } = self.harness.chain.status_message().status_v2(); SyncInfo { head_slot, head_root, finalized_epoch, finalized_root, + earliest_available_slot: Some(earliest_available_slot), } } @@ -449,18 +453,10 @@ fn build_rpc_block( RpcBlock::new(None, block, Some(blobs.clone())).unwrap() } Some(DataSidecars::DataColumns(columns)) => { - RpcBlock::new_with_custody_columns( - None, - block, - columns.clone(), - // TODO(das): Assumes CGC = max value. Change if we want to do more complex tests - columns.len(), - spec, - ) - .unwrap() + RpcBlock::new_with_custody_columns(None, block, columns.clone(), spec).unwrap() } // Block has no data, expects zero columns - None => RpcBlock::new_without_blobs(None, block, 0), + None => RpcBlock::new_without_blobs(None, block), } } diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 67c24b9c7a..13ef94c18d 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -96,7 +96,7 @@ impl SplitAttestation { } } - pub fn as_ref(&self) -> CompactAttestationRef { + pub fn as_ref(&self) -> CompactAttestationRef<'_, E> { CompactAttestationRef { checkpoint: &self.checkpoint, data: &self.data, @@ -438,7 +438,7 @@ impl AttestationMap { } /// Iterate all attestations in the map. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl Iterator> { self.checkpoint_map .iter() .flat_map(|(checkpoint_key, attestation_map)| attestation_map.iter(checkpoint_key)) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 7481aa896a..642fc51f69 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -700,8 +700,8 @@ impl OperationPool { pub fn get_all_proposer_slashings(&self) -> Vec { self.proposer_slashings .read() - .iter() - .map(|(_, slashing)| slashing.as_inner().clone()) + .values() + .map(|slashing| slashing.as_inner().clone()) .collect() } @@ -711,8 +711,8 @@ impl OperationPool { pub fn get_all_voluntary_exits(&self) -> Vec { self.voluntary_exits .read() - .iter() - .map(|(_, exit)| exit.as_inner().clone()) + .values() + .map(|exit| exit.as_inner().clone()) .collect() } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 79509e5f6c..88c8dbbf3c 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -86,15 +86,15 @@ impl PersistedOperationPool { let proposer_slashings = operation_pool .proposer_slashings .read() - .iter() - .map(|(_, slashing)| slashing.clone()) + .values() + .cloned() .collect(); let voluntary_exits = operation_pool .voluntary_exits .read() - .iter() - .map(|(_, exit)| exit.clone()) + .values() + .cloned() .collect(); let bls_to_execution_changes = operation_pool diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 7d086dcc32..f3f9aa97a2 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -68,6 +68,16 @@ pub fn cli_app() -> Command { .hide(true) .display_order(0) ) + .arg( + // TODO(das): remove this before PeerDAS release + Arg::new("advertise-false-custody-group-count") + .long("advertise-false-custody-group-count") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .help("Advertises a false CGC for testing PeerDAS. Do NOT use in production.") + .hide(true) + .display_order(0) + ) .arg( Arg::new("enable-sampling") .long("enable-sampling") @@ -692,54 +702,33 @@ pub fn cli_app() -> Command { /* * Eth1 Integration */ - .arg( - Arg::new("eth1") - .long("eth1") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("dummy-eth1") - .long("dummy-eth1") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .conflicts_with("eth1") - .display_order(0) - .hide(true) - ) .arg( Arg::new("eth1-purge-cache") .long("eth1-purge-cache") .value_name("PURGE-CACHE") - .help("Purges the eth1 block and deposit caches") + .help("DEPRECATED") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) + .hide(true) ) .arg( Arg::new("eth1-blocks-per-log-query") .long("eth1-blocks-per-log-query") .value_name("BLOCKS") - .help("Specifies the number of blocks that a deposit log query should span. \ - This will reduce the size of responses from the Eth1 endpoint.") - .default_value("1000") + .help("DEPRECATED") .action(ArgAction::Set) .display_order(0) + .hide(true) ) .arg( Arg::new("eth1-cache-follow-distance") .long("eth1-cache-follow-distance") .value_name("BLOCKS") - .help("Specifies the distance between the Eth1 chain head and the last block which \ - should be imported into the cache. Setting this value lower can help \ - compensate for irregular Proof-of-Work block times, but setting it too low \ - can make the node vulnerable to re-orgs.") + .help("DEPRECATED") .action(ArgAction::Set) .display_order(0) + .hide(true) ) .arg( Arg::new("slots-per-restore-point") @@ -808,14 +797,26 @@ pub fn cli_app() -> Command { Arg::new("hdiff-buffer-cache-size") .long("hdiff-buffer-cache-size") .value_name("SIZE") - .help("Number of hierarchical diff (hdiff) buffers to cache in memory. Each buffer \ - is around the size of a BeaconState so you should be cautious about setting \ - this value too high. This flag is irrelevant for most nodes, which run with \ - state pruning enabled.") + .help("Number of cold hierarchical diff (hdiff) buffers to cache in memory. Each \ + buffer is around the size of a BeaconState so you should be cautious about \ + setting this value too high. This flag is irrelevant for most nodes, which \ + run with state pruning enabled.") .default_value("16") .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("hot-hdiff-buffer-cache-size") + .long("hot-hdiff-buffer-cache-size") + .value_name("SIZE") + .help("Number of hot hierarchical diff (hdiff) buffers to cache in memory. Each \ + buffer is around the size of a BeaconState so you should be cautious about \ + setting this value too high. Setting this value higher can reduce the time \ + taken to store new states on disk at the cost of higher memory usage.") + .default_value("1") + .action(ArgAction::Set) + .display_order(0) + ) .arg( Arg::new("state-cache-size") .long("state-cache-size") @@ -1491,13 +1492,12 @@ pub fn cli_app() -> Command { .arg( Arg::new("disable-deposit-contract-sync") .long("disable-deposit-contract-sync") - .help("Explicitly disables syncing of deposit logs from the execution node. \ - This overrides any previous option that depends on it. \ - Useful if you intend to run a non-validating beacon node.") + .help("DEPRECATED") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .conflicts_with("staking") .display_order(0) + .hide(true) ) .arg( Arg::new("disable-optimistic-finalized-sync") @@ -1645,7 +1645,7 @@ pub fn cli_app() -> Command { .arg( Arg::new("delay-data-column-publishing") .long("delay-data-column-publishing") - .value_name("SECONDS") + .value_name("SECONDS") .action(ArgAction::Set) .help_heading(FLAG_HEADER) .help("TESTING ONLY: Artificially delay data column publishing by the specified number of seconds. \ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e887aa9abc..3c6339c03e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -8,17 +8,15 @@ use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; use clap::{parser::ValueSource, ArgMatches, Id}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; -use clap_utils::{parse_flag, parse_required}; +use clap_utils::{parse_flag, parse_optional, parse_required}; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; use execution_layer::DEFAULT_JWT_FILE; -use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; -use std::cmp::max; use std::collections::HashSet; use std::fmt::Debug; use std::fs; @@ -266,31 +264,21 @@ pub fn get_config( } /* - * Eth1 + * Deprecated Eth1 flags (can be removed in the next minor release after v7.1.0) */ - - if cli_args.get_flag("dummy-eth1") { - warn!("The --dummy-eth1 flag is deprecated"); - } - - if cli_args.get_flag("eth1") { - warn!("The --eth1 flag is deprecated"); - } - - if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { - client_config.eth1.blocks_per_log_query = val - .parse() - .map_err(|_| "eth1-blocks-per-log-query is not a valid integer".to_string())?; + if cli_args + .get_one::("eth1-blocks-per-log-query") + .is_some() + { + warn!("The eth1-blocks-per-log-query flag is deprecated"); } if cli_args.get_flag("eth1-purge-cache") { - client_config.eth1.purge_cache = true; + warn!("The eth1-purge-cache flag is deprecated"); } - if let Some(follow_distance) = - clap_utils::parse_optional(cli_args, "eth1-cache-follow-distance")? - { - client_config.eth1.cache_follow_distance = Some(follow_distance); + if clap_utils::parse_optional::(cli_args, "eth1-cache-follow-distance")?.is_some() { + warn!("The eth1-purge-cache flag is deprecated"); } // `--execution-endpoint` is required now. @@ -358,13 +346,6 @@ pub fn get_config( clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); - client_config.eth1.endpoint = Eth1Endpoint::Auth { - endpoint: execution_endpoint, - jwt_path: secret_file, - jwt_id: el_config.jwt_id.clone(), - jwt_version: el_config.jwt_version.clone(), - }; - // Store the EL config in the client config. client_config.execution_layer = Some(el_config); @@ -418,7 +399,13 @@ pub fn get_config( if let Some(hdiff_buffer_cache_size) = clap_utils::parse_optional(cli_args, "hdiff-buffer-cache-size")? { - client_config.store.hdiff_buffer_cache_size = hdiff_buffer_cache_size; + client_config.store.cold_hdiff_buffer_cache_size = hdiff_buffer_cache_size; + } + + if let Some(hdiff_buffer_cache_size) = + clap_utils::parse_optional(cli_args, "hot-hdiff-buffer-cache-size")? + { + client_config.store.hot_hdiff_buffer_cache_size = hdiff_buffer_cache_size; } client_config.store.compact_on_init = cli_args.get_flag("compact-db"); @@ -500,20 +487,9 @@ pub fn get_config( .as_ref() .ok_or("Context is missing eth2 network config")?; - client_config.eth1.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); - client_config.eth1.deposit_contract_deploy_block = - eth2_network_config.deposit_contract_deploy_block; - client_config.eth1.lowest_cached_block_number = - client_config.eth1.deposit_contract_deploy_block; - client_config.eth1.follow_distance = spec.eth1_follow_distance; - client_config.eth1.node_far_behind_seconds = - max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block; - client_config.eth1.chain_id = spec.deposit_chain_id.into(); - client_config.eth1.set_block_cache_truncation::(spec); - info!( - deploy_block = client_config.eth1.deposit_contract_deploy_block, - address = &client_config.eth1.deposit_contract_address, + deploy_block = eth2_network_config.deposit_contract_deploy_block, + address = ?spec.deposit_contract_address, "Deposit contract" ); @@ -809,9 +785,8 @@ pub fn get_config( } } - // Note: This overrides any previous flags that enable this option. if cli_args.get_flag("disable-deposit-contract-sync") { - client_config.sync_eth1_chain = false; + warn!("The disable-deposit-contract-sync flag is deprecated"); } client_config.chain.prepare_payload_lookahead = @@ -1197,6 +1172,12 @@ pub fn set_network_config( config.import_all_attestations = true; } + if let Some(advertise_false_custody_group_count) = + parse_optional(cli_args, "advertise-false-custody-group-count")? + { + config.advertise_false_custody_group_count = Some(advertise_false_custody_group_count); + } + if parse_flag(cli_args, "shutdown-after-sync") { config.shutdown_after_sync = true; } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index a7f92434ce..96abae735b 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -2,9 +2,7 @@ mod cli; mod config; pub use beacon_chain; -use beacon_chain::{ - builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, -}; +use beacon_chain::{builder::Witness, slot_clock::SystemTimeSlotClock}; use clap::ArgMatches; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; @@ -19,15 +17,8 @@ use tracing::{info, warn}; use types::{ChainSpec, Epoch, EthSpec, ForkName}; /// A type-alias to the tighten the definition of a production-intended `Client`. -pub type ProductionClient = Client< - Witness< - SystemTimeSlotClock, - CachingEth1Backend, - E, - BeaconNodeBackend, - BeaconNodeBackend, - >, ->; +pub type ProductionClient = + Client, BeaconNodeBackend>>; /// The beacon node `Client` that will be used in production. /// @@ -132,22 +123,7 @@ impl ProductionBeaconNode { let builder = builder .beacon_chain_builder(client_genesis, client_config.clone()) .await?; - let builder = if client_config.sync_eth1_chain { - info!( - endpoint = ?client_config.eth1.endpoint, - method = "json rpc via http", - "Block production enabled" - ); - builder - .caching_eth1_backend(client_config.eth1.clone()) - .await? - } else { - info!( - reason = "no eth1 backend configured", - "Block production disabled" - ); - builder.no_eth1_backend()? - }; + info!("Block production enabled"); let builder = builder.system_time_slot_clock()?; diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index a84573eb40..c16573df5e 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,6 +1,6 @@ use crate::hdiff::HierarchyConfig; use crate::superstruct; -use crate::{AnchorInfo, DBColumn, Error, Split, StoreItem}; +use crate::{DBColumn, Error, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -24,7 +24,8 @@ pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); pub const DEFAULT_STATE_CACHE_HEADROOM: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); -pub const DEFAULT_HDIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); +pub const DEFAULT_COLD_HDIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); +pub const DEFAULT_HOT_HDIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); const EST_COMPRESSION_FACTOR: usize = 2; pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; @@ -42,8 +43,10 @@ pub struct StoreConfig { pub compression_level: i32, /// Maximum number of historic states to store in the in-memory historic state cache. pub historic_state_cache_size: NonZeroUsize, - /// Maximum number of `HDiffBuffer`s to store in memory. - pub hdiff_buffer_cache_size: NonZeroUsize, + /// Maximum number of cold `HDiffBuffer`s to store in memory. + pub cold_hdiff_buffer_cache_size: NonZeroUsize, + /// Maximum number of hot `HDiffBuffers` to store in memory. + pub hot_hdiff_buffer_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. @@ -65,14 +68,12 @@ pub struct StoreConfig { /// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params. #[superstruct( - variants(V1, V22), + variants(V22), variant_attributes(derive(Debug, Clone, PartialEq, Eq, Encode, Decode)) )] #[derive(Clone, Debug, PartialEq, Eq)] pub struct OnDiskStoreConfig { - #[superstruct(only(V1))] - pub slots_per_restore_point: u64, - /// Prefix byte to future-proof versions of the `OnDiskStoreConfig` post V1 + /// Prefix byte to future-proof versions of the `OnDiskStoreConfig`. #[superstruct(only(V22))] version_byte: u8, #[superstruct(only(V22))] @@ -90,10 +91,6 @@ impl OnDiskStoreConfigV22 { #[derive(Debug, Clone)] pub enum StoreConfigError { - MismatchedSlotsPerRestorePoint { - config: u64, - on_disk: u64, - }, InvalidCompressionLevel { level: i32, }, @@ -112,7 +109,8 @@ impl Default for StoreConfig { state_cache_size: DEFAULT_STATE_CACHE_SIZE, state_cache_headroom: DEFAULT_STATE_CACHE_HEADROOM, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, - hdiff_buffer_cache_size: DEFAULT_HDIFF_BUFFER_CACHE_SIZE, + cold_hdiff_buffer_cache_size: DEFAULT_COLD_HDIFF_BUFFER_CACHE_SIZE, + hot_hdiff_buffer_cache_size: DEFAULT_HOT_HDIFF_BUFFER_CACHE_SIZE, compression_level: DEFAULT_COMPRESSION_LEVEL, compact_on_init: false, compact_on_prune: true, @@ -134,21 +132,13 @@ impl StoreConfig { pub fn check_compatibility( &self, on_disk_config: &OnDiskStoreConfig, - split: &Split, - anchor: &AnchorInfo, ) -> Result<(), StoreConfigError> { - // Allow changing the hierarchy exponents if no historic states are stored. - let no_historic_states_stored = anchor.no_historic_states_stored(split.slot); - let hierarchy_config_changed = - if let Ok(on_disk_hierarchy_config) = on_disk_config.hierarchy_config() { - *on_disk_hierarchy_config != self.hierarchy_config - } else { - false - }; - - if hierarchy_config_changed && !no_historic_states_stored { + // We previously allowed the hierarchy exponents to change on non-archive nodes, but since + // schema v24 and the use of hdiffs in the hot DB, changing will require a resync. + let current_config = self.as_disk_config(); + if current_config != *on_disk_config { Err(StoreConfigError::IncompatibleStoreConfig { - config: self.as_disk_config(), + config: current_config, on_disk: on_disk_config.clone(), }) } else { @@ -222,32 +212,21 @@ impl StoreItem for OnDiskStoreConfig { fn as_store_bytes(&self) -> Vec { match self { - OnDiskStoreConfig::V1(value) => value.as_ssz_bytes(), OnDiskStoreConfig::V22(value) => value.as_ssz_bytes(), } } fn from_store_bytes(bytes: &[u8]) -> Result { - // NOTE: V22 config can never be deserialized as a V1 because the minimum length of its - // serialization is: 1 prefix byte + 1 offset (OnDiskStoreConfigV1 container) + - // 1 offset (HierarchyConfig container) = 9. - if let Ok(value) = OnDiskStoreConfigV1::from_ssz_bytes(bytes) { - return Ok(Self::V1(value)); + match bytes.first() { + Some(22) => Ok(Self::V22(OnDiskStoreConfigV22::from_ssz_bytes(bytes)?)), + version_byte => Err(StoreConfigError::InvalidVersionByte(version_byte.copied()).into()), } - - Ok(Self::V22(OnDiskStoreConfigV22::from_ssz_bytes(bytes)?)) } } #[cfg(test)] mod test { use super::*; - use crate::{ - metadata::{ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN}, - AnchorInfo, Split, - }; - use ssz::DecodeError; - use types::{Hash256, Slot}; #[test] fn check_compatibility_ok() { @@ -257,24 +236,7 @@ mod test { let on_disk_config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new( store_config.hierarchy_config.clone(), )); - let split = Split::default(); - assert!(store_config - .check_compatibility(&on_disk_config, &split, &ANCHOR_UNINITIALIZED) - .is_ok()); - } - - #[test] - fn check_compatibility_after_migration() { - let store_config = StoreConfig { - ..Default::default() - }; - let on_disk_config = OnDiskStoreConfig::V1(OnDiskStoreConfigV1 { - slots_per_restore_point: 8192, - }); - let split = Split::default(); - assert!(store_config - .check_compatibility(&on_disk_config, &split, &ANCHOR_UNINITIALIZED) - .is_ok()); + assert!(store_config.check_compatibility(&on_disk_config).is_ok()); } #[test] @@ -283,70 +245,11 @@ mod test { let on_disk_config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(HierarchyConfig { exponents: vec![5, 8, 11, 13, 16, 18, 21], })); - let split = Split { - slot: Slot::new(32), - ..Default::default() - }; - assert!(store_config - .check_compatibility(&on_disk_config, &split, &ANCHOR_FOR_ARCHIVE_NODE) - .is_err()); + assert!(store_config.check_compatibility(&on_disk_config).is_err()); } #[test] - fn check_compatibility_hierarchy_config_update() { - let store_config = StoreConfig { - ..Default::default() - }; - let on_disk_config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(HierarchyConfig { - exponents: vec![5, 8, 11, 13, 16, 18, 21], - })); - let split = Split::default(); - let anchor = AnchorInfo { - anchor_slot: Slot::new(0), - oldest_block_slot: Slot::new(0), - oldest_block_parent: Hash256::ZERO, - state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, - state_lower_limit: Slot::new(0), - }; - assert!(store_config - .check_compatibility(&on_disk_config, &split, &anchor) - .is_ok()); - } - - #[test] - fn serde_on_disk_config_v0_from_v1_default() { - let config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(<_>::default())); - let config_bytes = config.as_store_bytes(); - // On a downgrade, the previous version of lighthouse will attempt to deserialize the - // prefixed V22 as just the V1 version. - assert_eq!( - OnDiskStoreConfigV1::from_ssz_bytes(&config_bytes).unwrap_err(), - DecodeError::InvalidByteLength { - len: 16, - expected: 8 - }, - ); - } - - #[test] - fn serde_on_disk_config_v0_from_v1_empty() { - let config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(HierarchyConfig { - exponents: vec![], - })); - let config_bytes = config.as_store_bytes(); - // On a downgrade, the previous version of lighthouse will attempt to deserialize the - // prefixed V22 as just the V1 version. - assert_eq!( - OnDiskStoreConfigV1::from_ssz_bytes(&config_bytes).unwrap_err(), - DecodeError::InvalidByteLength { - len: 9, - expected: 8 - }, - ); - } - - #[test] - fn serde_on_disk_config_v1_roundtrip() { + fn on_disk_config_v22_roundtrip() { let config = OnDiskStoreConfig::V22(OnDiskStoreConfigV22::new(<_>::default())); let bytes = config.as_store_bytes(); assert_eq!(bytes[0], 22); diff --git a/beacon_node/store/src/database/interface.rs b/beacon_node/store/src/database/interface.rs index b213433241..e405c6227d 100644 --- a/beacon_node/store/src/database/interface.rs +++ b/beacon_node/store/src/database/interface.rs @@ -105,15 +105,6 @@ impl KeyValueStore for BeaconNodeBackend { } } - fn begin_rw_transaction(&self) -> parking_lot::MutexGuard<()> { - match self { - #[cfg(feature = "leveldb")] - BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::begin_rw_transaction(txn), - #[cfg(feature = "redb")] - BeaconNodeBackend::Redb(txn) => redb_impl::Redb::begin_rw_transaction(txn), - } - } - fn compact(&self) -> Result<(), Error> { match self { #[cfg(feature = "leveldb")] @@ -123,7 +114,11 @@ impl KeyValueStore for BeaconNodeBackend { } } - fn iter_column_keys_from(&self, _column: DBColumn, from: &[u8]) -> ColumnKeyIter { + fn iter_column_keys_from( + &self, + _column: DBColumn, + from: &[u8], + ) -> ColumnKeyIter<'_, K> { match self { #[cfg(feature = "leveldb")] BeaconNodeBackend::LevelDb(txn) => { @@ -136,7 +131,7 @@ impl KeyValueStore for BeaconNodeBackend { } } - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter<'_, K> { match self { #[cfg(feature = "leveldb")] BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::iter_column_keys(txn, column), @@ -145,7 +140,7 @@ impl KeyValueStore for BeaconNodeBackend { } } - fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter<'_, K> { match self { #[cfg(feature = "leveldb")] BeaconNodeBackend::LevelDb(txn) => { diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 81d6d1d4bd..54d7175089 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -13,7 +13,6 @@ use leveldb::{ iterator::{Iterable, LevelDBIterator}, options::{Options, ReadOptions}, }; -use parking_lot::{Mutex, MutexGuard}; use std::collections::HashSet; use std::marker::PhantomData; use std::path::Path; @@ -23,8 +22,6 @@ use super::interface::WriteOptions; pub struct LevelDB { db: Database, - /// A mutex to synchronise sensitive read-write transactions. - transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -43,16 +40,14 @@ impl LevelDB { options.create_if_missing = true; let db = Database::open(path, options)?; - let transaction_mutex = Mutex::new(()); Ok(Self { db, - transaction_mutex, _phantom: PhantomData, }) } - pub fn read_options(&self) -> ReadOptions { + pub fn read_options(&self) -> ReadOptions<'_, BytesKey> { ReadOptions::new() } @@ -177,10 +172,6 @@ impl LevelDB { Ok(()) } - pub fn begin_rw_transaction(&self) -> MutexGuard<()> { - self.transaction_mutex.lock() - } - /// Compact all values in the states and states flag columns. pub fn compact(&self) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::DISK_DB_COMPACT_TIMES); @@ -216,7 +207,7 @@ impl LevelDB { Ok(()) } - pub fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + pub fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter<'_, K> { let start_key = BytesKey::from_vec(get_key_for_col(column, from)); let iter = self.db.iter(self.read_options()); iter.seek(&start_key); @@ -240,7 +231,11 @@ impl LevelDB { ) } - pub fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + pub fn iter_column_keys_from( + &self, + column: DBColumn, + from: &[u8], + ) -> ColumnKeyIter<'_, K> { let start_key = BytesKey::from_vec(get_key_for_col(column, from)); let iter = self.db.keys_iter(self.read_options()); @@ -262,11 +257,11 @@ impl LevelDB { } /// Iterate through all keys and values in a particular column. - pub fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + pub fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter<'_, K> { self.iter_column_keys_from(column, &vec![0; column.key_size()]) } - pub fn iter_column(&self, column: DBColumn) -> ColumnIter { + pub fn iter_column(&self, column: DBColumn) -> ColumnIter<'_, K> { self.iter_column_from(column, &vec![0; column.key_size()]) } diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index cbe575d184..10d387adc8 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -1,6 +1,6 @@ use crate::{metrics, ColumnIter, ColumnKeyIter, Key}; use crate::{DBColumn, Error, KeyValueStoreOp}; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::RwLock; use redb::TableDefinition; use std::collections::HashSet; use std::{borrow::BorrowMut, marker::PhantomData, path::Path}; @@ -13,7 +13,6 @@ pub const DB_FILE_NAME: &str = "database.redb"; pub struct Redb { db: RwLock, - transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -31,7 +30,6 @@ impl Redb { pub fn open(path: &Path) -> Result { let db_file = path.join(DB_FILE_NAME); let db = redb::Database::create(db_file)?; - let transaction_mutex = Mutex::new(()); for column in DBColumn::iter() { Redb::::create_table(&db, column.into())?; @@ -39,7 +37,6 @@ impl Redb { Ok(Self { db: db.into(), - transaction_mutex, _phantom: PhantomData, }) } @@ -61,10 +58,6 @@ impl Redb { opts } - pub fn begin_rw_transaction(&self) -> MutexGuard<()> { - self.transaction_mutex.lock() - } - pub fn put_bytes_with_options( &self, col: DBColumn, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index cff08bc655..eb1fb64718 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,6 +1,6 @@ use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; -use crate::hot_cold_store::HotColdDBError; +use crate::hot_cold_store::{HotColdDBError, StateSummaryIteratorError}; use crate::{hdiff, DBColumn}; #[cfg(feature = "leveldb")] use leveldb::error::Error as LevelDBError; @@ -26,6 +26,9 @@ pub enum Error { SplitPointModified(Slot, Slot), ConfigError(StoreConfigError), MigrationError(String), + /// The store's `anchor_info` is still the default uninitialized value when attempting a state + /// write + AnchorUninitialized, /// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied. AnchorInfoConcurrentMutation, /// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied. @@ -47,11 +50,16 @@ pub enum Error { expected: Hash256, computed: Hash256, }, + MissingState(Hash256), + MissingHotStateSummary(Hash256), + MissingHotStateSnapshot(Hash256, Slot), MissingGenesisState, MissingSnapshot(Slot), + LoadingHotHdiffBufferError(String, Hash256, Box), + LoadingHotStateError(String, Hash256, Box), BlockReplayError(BlockReplayError), AddPayloadLogicError, - InvalidKey, + InvalidKey(String), InvalidBytes, InconsistentFork(InconsistentFork), #[cfg(feature = "leveldb")] @@ -75,6 +83,26 @@ pub enum Error { MissingBlock(Hash256), GenesisStateUnknown, ArithError(safe_arith::ArithError), + MismatchedDiffBaseState { + expected_slot: Slot, + stored_slot: Slot, + }, + SnapshotDiffBaseState { + slot: Slot, + }, + LoadAnchorInfo(Box), + LoadSplit(Box), + LoadBlobInfo(Box), + LoadDataColumnInfo(Box), + LoadConfig(Box), + LoadHotStateSummary(Hash256, Box), + LoadHotStateSummaryForSplit(Box), + StateSummaryIteratorError { + error: StateSummaryIteratorError, + from_state_root: Hash256, + from_state_slot: Slot, + target_slot: Slot, + }, } pub trait HandleUnavailable { diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index a659c65452..5731ebcbe0 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -27,6 +27,7 @@ pub enum Error { Compression(std::io::Error), InvalidSszState(ssz::DecodeError), InvalidBalancesLength, + LessThanStart(Slot, Slot), } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)] @@ -67,6 +68,10 @@ impl FromStr for HierarchyConfig { return Err("hierarchy-exponents must be in ascending order".to_string()); } + if exponents.is_empty() { + return Err("empty exponents".to_string()); + } + Ok(HierarchyConfig { exponents }) } } @@ -478,7 +483,9 @@ impl ValidatorsDiff { Hash256::ZERO }, // effective_balance can increase and decrease - effective_balance: y.effective_balance - x.effective_balance, + effective_balance: y + .effective_balance + .wrapping_sub(x.effective_balance), // slashed can only change from false into true. In an index re-use it can // switch back to false, but in that case the pubkey will also change. slashed: y.slashed, @@ -642,10 +649,26 @@ impl HierarchyConfig { Err(Error::InvalidHierarchy) } } + + pub fn exponent_for_slot(slot: Slot) -> u32 { + slot.as_u64().trailing_zeros() + } } impl HierarchyModuli { - pub fn storage_strategy(&self, slot: Slot) -> Result { + /// * `slot` - Slot of the storage strategy + /// * `start_slot` - Slot before which states are not available. Initial snapshot point, which + /// may not be aligned to the hierarchy moduli values. Given an example of + /// exponents [5,13,21], to reconstruct state at slot 3,000,003: if start = 3,000,002 + /// layer 2 diff will point to the start snapshot instead of the layer 1 diff at + /// 2998272. + pub fn storage_strategy(&self, slot: Slot, start_slot: Slot) -> Result { + match slot.cmp(&start_slot) { + Ordering::Less => return Err(Error::LessThanStart(slot, start_slot)), + Ordering::Equal => return Ok(StorageStrategy::Snapshot), + Ordering::Greater => {} // continue + } + // last = full snapshot interval let last = self.moduli.last().copied().ok_or(Error::InvalidHierarchy)?; // first = most frequent diff layer, need to replay blocks from this layer @@ -667,14 +690,22 @@ impl HierarchyModuli { .find_map(|(&n_big, &n_small)| { if slot % n_small == 0 { // Diff from the previous layer. - Some(StorageStrategy::DiffFrom(slot / n_big * n_big)) + let from = slot / n_big * n_big; + // Or from start point + let from = std::cmp::max(from, start_slot); + Some(StorageStrategy::DiffFrom(from)) } else { // Keep trying with next layer None } }) // Exhausted layers, need to replay from most frequent layer - .unwrap_or(StorageStrategy::ReplayFrom(slot / first * first))) + .unwrap_or_else(|| { + let from = slot / first * first; + // Or from start point + let from = std::cmp::max(from, start_slot); + StorageStrategy::ReplayFrom(from) + })) } /// Return the smallest slot greater than or equal to `slot` at which a full snapshot should @@ -703,6 +734,26 @@ impl HierarchyModuli { |second_layer_moduli| Ok(slot % *second_layer_moduli == 0), ) } + + /// For each layer, returns the closest diff less than or equal to `slot`. + pub fn closest_layer_points(&self, slot: Slot, start_slot: Slot) -> Vec { + let mut layers = self + .moduli + .iter() + .map(|&n| { + let from = slot / n * n; + // Or from start point + std::cmp::max(from, start_slot) + }) + .collect::>(); + + // Remove duplication caused by the capping at `start_slot` (multiple + // layers may have the same slot equal to `start_slot`), or shared multiples (a slot that is + // a multiple of 2**n will also be a multiple of 2**m for all m < n). + layers.dedup(); + + layers + } } impl StorageStrategy { @@ -732,6 +783,27 @@ impl StorageStrategy { } .map(Slot::from) } + + /// Returns the slot that storage_strategy points to. + pub fn diff_base_slot(&self) -> Option { + match self { + Self::ReplayFrom(from) => Some(*from), + Self::DiffFrom(from) => Some(*from), + Self::Snapshot => None, + } + } + + pub fn is_replay_from(&self) -> bool { + matches!(self, Self::ReplayFrom(_)) + } + + pub fn is_diff_from(&self) -> bool { + matches!(self, Self::DiffFrom(_)) + } + + pub fn is_snapshot(&self) -> bool { + matches!(self, Self::Snapshot) + } } #[cfg(test)] @@ -743,34 +815,37 @@ mod tests { fn default_storage_strategy() { let config = HierarchyConfig::default(); config.validate().unwrap(); + let sslot = Slot::new(0); let moduli = config.to_moduli().unwrap(); // Full snapshots at multiples of 2^21. let snapshot_freq = Slot::new(1 << 21); assert_eq!( - moduli.storage_strategy(Slot::new(0)).unwrap(), + moduli.storage_strategy(Slot::new(0), sslot).unwrap(), StorageStrategy::Snapshot ); assert_eq!( - moduli.storage_strategy(snapshot_freq).unwrap(), + moduli.storage_strategy(snapshot_freq, sslot).unwrap(), StorageStrategy::Snapshot ); assert_eq!( - moduli.storage_strategy(snapshot_freq * 3).unwrap(), + moduli.storage_strategy(snapshot_freq * 3, sslot).unwrap(), StorageStrategy::Snapshot ); // Diffs should be from the previous layer (the snapshot in this case), and not the previous diff in the same layer. let first_layer = Slot::new(1 << 18); assert_eq!( - moduli.storage_strategy(first_layer * 2).unwrap(), + moduli.storage_strategy(first_layer * 2, sslot).unwrap(), StorageStrategy::DiffFrom(Slot::new(0)) ); let replay_strategy_slot = first_layer + 1; assert_eq!( - moduli.storage_strategy(replay_strategy_slot).unwrap(), + moduli + .storage_strategy(replay_strategy_slot, sslot) + .unwrap(), StorageStrategy::ReplayFrom(first_layer) ); } @@ -940,4 +1015,93 @@ mod tests { ] ); } + + // Test that the diffs and snapshots required for storage of split states are retained in the + // hot DB as the split slot advances, if we begin from an initial configuration where this + // invariant holds. + fn test_slots_retained_invariant(hierarchy: HierarchyModuli, start_slot: u64, epoch_jump: u64) { + let start_slot = Slot::new(start_slot); + let mut finalized_slot = start_slot; + + // Initially we have just one snapshot stored at the `start_slot`. This is what checkpoint + // sync sets up (or the V24 migration). + let mut retained_slots = vec![finalized_slot]; + + // Iterate until we've reached two snapshots in the future. + let stop_at = hierarchy + .next_snapshot_slot(hierarchy.next_snapshot_slot(start_slot).unwrap() + 1) + .unwrap(); + + while finalized_slot <= stop_at { + // Jump multiple epocsh at a time because inter-epoch states are not interesting and + // would take too long to iterate over. + let new_finalized_slot = finalized_slot + 32 * epoch_jump; + + let new_retained_slots = hierarchy.closest_layer_points(new_finalized_slot, start_slot); + + for slot in &new_retained_slots { + // All new retained slots must either be already stored prior to the old finalized + // slot, OR newer than the finalized slot (i.e. stored in the hot DB as part of + // regular state storage). + assert!(retained_slots.contains(slot) || *slot >= finalized_slot); + } + + retained_slots = new_retained_slots; + finalized_slot = new_finalized_slot; + } + } + + #[test] + fn slots_retained_invariant() { + let cases = [ + // Default hierarchy with a start_slot between the 2^13 and 2^16 layers. + ( + HierarchyConfig::default().to_moduli().unwrap(), + 2 * (1 << 14) - 5 * 32, + 1, + ), + // Default hierarchy with a start_slot between the 2^13 and 2^16 layers, with 8 epochs + // finalizing at a time (should not make any difference). + ( + HierarchyConfig::default().to_moduli().unwrap(), + 2 * (1 << 14) - 5 * 32, + 8, + ), + // Very dense hierarchy config. + ( + HierarchyConfig::from_str("5,7") + .unwrap() + .to_moduli() + .unwrap(), + 32, + 1, + ), + // Very dense hierarchy config that skips a whole snapshot on its first finalization. + ( + HierarchyConfig::from_str("5,7") + .unwrap() + .to_moduli() + .unwrap(), + 32, + 1 << 7, + ), + ]; + + for (hierarchy, start_slot, epoch_jump) in cases { + test_slots_retained_invariant(hierarchy, start_slot, epoch_jump); + } + } + + #[test] + fn closest_layer_points_unique() { + let hierarchy = HierarchyConfig::default().to_moduli().unwrap(); + + let start_slot = Slot::new(0); + let end_slot = hierarchy.next_snapshot_slot(Slot::new(1)).unwrap(); + + for slot in (0..end_slot.as_u64()).map(Slot::new) { + let closest_layer_points = hierarchy.closest_layer_points(slot, start_slot); + assert!(closest_layer_points.is_sorted_by(|a, b| a > b)); + } + } } diff --git a/beacon_node/store/src/historic_state_cache.rs b/beacon_node/store/src/historic_state_cache.rs index c0e8f8346c..e5abb04c07 100644 --- a/beacon_node/store/src/historic_state_cache.rs +++ b/beacon_node/store/src/historic_state_cache.rs @@ -34,11 +34,17 @@ impl HistoricStateCache { pub fn get_hdiff_buffer(&mut self, slot: Slot) -> Option { if let Some(buffer_ref) = self.hdiff_buffers.get(&slot) { - let _timer = metrics::start_timer(&metrics::BEACON_HDIFF_BUFFER_CLONE_TIMES); + let _timer = metrics::start_timer_vec( + &metrics::BEACON_HDIFF_BUFFER_CLONE_TIME, + metrics::COLD_METRIC, + ); Some(buffer_ref.clone()) } else if let Some(state) = self.states.get(&slot) { let buffer = HDiffBuffer::from_state(state.clone()); - let _timer = metrics::start_timer(&metrics::BEACON_HDIFF_BUFFER_CLONE_TIMES); + let _timer = metrics::start_timer_vec( + &metrics::BEACON_HDIFF_BUFFER_CLONE_TIME, + metrics::COLD_METRIC, + ); let cloned = buffer.clone(); drop(_timer); self.hdiff_buffers.put(slot, cloned); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index d4b68357b2..f5e44f7ac9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,21 +1,22 @@ use crate::config::{OnDiskStoreConfig, StoreConfig}; use crate::database::interface::BeaconNodeBackend; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; -use crate::hdiff::{HDiff, HDiffBuffer, HierarchyModuli, StorageStrategy}; +use crate::hdiff::{HDiff, HDiffBuffer, HierarchyConfig, HierarchyModuli, StorageStrategy}; use crate::historic_state_cache::HistoricStateCache; -use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, PruningCheckpoint, SchemaVersion, - ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, - COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, DATA_COLUMN_INFO_KEY, - PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, + AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, SchemaVersion, ANCHOR_INFO_KEY, + ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, + CURRENT_SCHEMA_VERSION, DATA_COLUMN_INFO_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, + STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, DBColumn, - DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, + metrics::{self, COLD_METRIC, HOT_METRIC}, + parse_data_column_key, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, ItemStore, + KeyValueStoreOp, StoreItem, StoreOp, }; use itertools::{process_results, Itertools}; use lru::LruCache; @@ -28,7 +29,7 @@ use state_processing::{ block_replayer::PreSlotHook, AllCaches, BlockProcessingError, BlockReplayer, SlotProcessingError, }; -use std::cmp::min; +use std::cmp::{min, Ordering}; use std::collections::{HashMap, HashSet}; use std::io::{Read, Write}; use std::marker::PhantomData; @@ -59,7 +60,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// The starting slots for the range of data columns stored in the database. data_column_info: RwLock, pub(crate) config: StoreConfig, - pub(crate) hierarchy: HierarchyModuli, + pub hierarchy: HierarchyModuli, /// Cold database containing compact historical data. pub cold_db: Cold, /// Database containing blobs. If None, store falls back to use `cold_db`. @@ -159,9 +160,13 @@ pub enum HotColdDBError { MissingColdStateSummary(Hash256), MissingHotStateSummary(Hash256), MissingEpochBoundaryState(Hash256, Hash256), + MissingHotState { + state_root: Hash256, + requested_by_state_summary: (Hash256, Slot), + }, MissingPrevState(Hash256), MissingSplitState(Hash256, Slot), - MissingStateDiff(Hash256), + MissingHotHDiff(Hash256), MissingHDiff(Slot), MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), @@ -170,7 +175,7 @@ pub enum HotColdDBError { MissingFrozenBlock(Slot), MissingPathToBlobsDatabase, BlobsPreviouslyInDefaultStore, - HotStateSummaryError(BeaconStateError), + HdiffGetPriorStateRootError(Slot, Slot), RestorePointDecodeError(ssz::DecodeError), BlockReplayBeaconError(BeaconStateError), BlockReplaySlotError(SlotProcessingError), @@ -203,6 +208,8 @@ impl HotColdDB, MemoryStore> { let hierarchy = config.hierarchy_config.to_moduli()?; + // NOTE: Anchor slot is initialized to 0, which is only valid for new DBs. We shouldn't + // be reusing memory stores, but if we want to do that we should redo this. let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(ANCHOR_UNINITIALIZED), @@ -215,9 +222,10 @@ impl HotColdDB, MemoryStore> { state_cache: Mutex::new(StateCache::new( config.state_cache_size, config.state_cache_headroom, + config.hot_hdiff_buffer_cache_size, )), historic_state_cache: Mutex::new(HistoricStateCache::new( - config.hdiff_buffer_cache_size, + config.cold_hdiff_buffer_cache_size, config.historic_state_cache_size, )), config, @@ -243,12 +251,16 @@ impl HotColdDB, BeaconNodeBackend> { config: StoreConfig, spec: Arc, ) -> Result, Error> { + debug!("Opening HotColdDB"); config.verify::()?; let hierarchy = config.hierarchy_config.to_moduli()?; + debug!(?hot_path, "Opening LevelDB"); let hot_db = BeaconNodeBackend::open(&config, hot_path)?; + let anchor_info = RwLock::new(Self::load_anchor_info(&hot_db)?); + debug!(?anchor_info, "Loaded anchor info"); let db = HotColdDB { split: RwLock::new(Split::default()), @@ -262,9 +274,10 @@ impl HotColdDB, BeaconNodeBackend> { state_cache: Mutex::new(StateCache::new( config.state_cache_size, config.state_cache_headroom, + config.hot_hdiff_buffer_cache_size, )), historic_state_cache: Mutex::new(HistoricStateCache::new( - config.hdiff_buffer_cache_size, + config.cold_hdiff_buffer_cache_size, config.historic_state_cache_size, )), config, @@ -279,12 +292,27 @@ impl HotColdDB, BeaconNodeBackend> { // Load the previous split slot from the database (if any). This ensures we can // stop and restart correctly. This needs to occur *before* running any migrations // because some migrations load states and depend on the split. + // + // We use a method that is ambivalent to the state summaries being V22 or V24, because + // we need to support several scenarios: + // + // - Migrating from V22 to V24: initially summaries are V22 , and we need + // to be able to load a block root from them. Loading the split partially at first + // (without reading a V24 summary) and then completing the full load after the migration + // runs is possible in this case, but not in the next case. + // - Migrating from V24 to V22: initially summaries are V24, but after the migration runs + // they will be V22. If we used the "load full split after migration" approach with strict + // V24 summaries, it would break when trying to read V22 summaries after the migration. + // + // Therefore we take the most flexible approach of reading _either_ a V22 or V24 summary and + // using this to load the split correctly the first time. if let Some(split) = db.load_split()? { *db.split.write() = split; info!( %split.slot, - split_state = ?split.state_root, + ?split.state_root, + ?split.block_root, "Hot-Cold DB initialized" ); } @@ -353,6 +381,16 @@ impl HotColdDB, BeaconNodeBackend> { "Blob DB initialized" ); + // Ensure that any on-disk config is compatible with the supplied config. + // + // We do this prior to the migration now, because we don't want the migration using the + // in-memory config if it is inconsistent with the on-disk config. In future we may need + // to put this in/after the migration if the migration changes the config format. + if let Some(disk_config) = db.load_config()? { + db.config.check_compatibility(&disk_config)?; + } + db.store_config()?; + // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. let db = Arc::new(db); @@ -362,31 +400,16 @@ impl HotColdDB, BeaconNodeBackend> { to_version = CURRENT_SCHEMA_VERSION.as_u64(), "Attempting schema migration" ); - migrate_schema(db.clone(), schema_version, CURRENT_SCHEMA_VERSION)?; + migrate_schema(db.clone(), schema_version, CURRENT_SCHEMA_VERSION).map_err(|e| { + Error::MigrationError(format!( + "Migrating from {:?} to {:?}: {:?}", + schema_version, CURRENT_SCHEMA_VERSION, e + )) + })?; } else { db.store_schema_version(CURRENT_SCHEMA_VERSION)?; } - // Ensure that any on-disk config is compatible with the supplied config. - if let Some(disk_config) = db.load_config()? { - let split = db.get_split_info(); - let anchor = db.get_anchor_info(); - db.config - .check_compatibility(&disk_config, &split, &anchor)?; - - // Inform user if hierarchy config is changing. - if let Ok(hierarchy_config) = disk_config.hierarchy_config() { - if &db.config.hierarchy_config != hierarchy_config { - info!( - previous_config = %hierarchy_config, - new_config = %db.config.hierarchy_config, - "Updating historic state config" - ); - } - } - } - db.store_config()?; - // TODO(tree-states): Here we can choose to prune advanced states to reclaim disk space. As // it's a foreground task there's no risk of race condition that can corrupt the DB. // Advanced states for invalid blocks that were never written to the DB, or descendants of @@ -400,20 +423,51 @@ impl HotColdDB, BeaconNodeBackend> { info!("Foreground compaction complete"); } + debug!(anchor = ?db.get_anchor_info(), "Store anchor info"); + Ok(db) } } impl, Cold: ItemStore> HotColdDB { + fn cold_storage_strategy(&self, slot: Slot) -> Result { + // The start slot for the freezer HDiff is always 0 + Ok(self.hierarchy.storage_strategy(slot, Slot::new(0))?) + } + + pub fn hot_storage_strategy(&self, slot: Slot) -> Result { + Ok(self + .hierarchy + .storage_strategy(slot, self.hot_hdiff_start_slot()?)?) + } + + pub fn hot_hdiff_start_slot(&self) -> Result { + let anchor_slot = self.anchor_info.read_recursive().anchor_slot; + if anchor_slot == u64::MAX { + // If hot_hdiff_start_slot returns such a high value all writes will fail. This should + // never happen, but it's best to stop this useless value from propagating downstream + Err(Error::AnchorUninitialized) + } else { + Ok(anchor_slot) + } + } + pub fn update_finalized_state( &self, state_root: Hash256, block_root: Hash256, state: BeaconState, ) -> Result<(), Error> { - self.state_cache - .lock() - .update_finalized_state(state_root, block_root, state) + let start_slot = self.get_anchor_info().anchor_slot; + let pre_finalized_slots_to_retain = self + .hierarchy + .closest_layer_points(state.slot(), start_slot); + self.state_cache.lock().update_finalized_state( + state_root, + block_root, + state, + &pre_finalized_slots_to_retain, + ) } pub fn state_cache_len(&self) -> usize { @@ -431,20 +485,34 @@ impl, Cold: ItemStore> HotColdDB &metrics::STORE_BEACON_BLOB_CACHE_SIZE, self.block_cache.lock().blob_cache.len() as i64, ); + let state_cache = self.state_cache.lock(); metrics::set_gauge( &metrics::STORE_BEACON_STATE_CACHE_SIZE, - self.state_cache.lock().len() as i64, + state_cache.len() as i64, ); + metrics::set_gauge_vec( + &metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_SIZE, + HOT_METRIC, + state_cache.num_hdiff_buffers() as i64, + ); + metrics::set_gauge_vec( + &metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_BYTE_SIZE, + HOT_METRIC, + state_cache.hdiff_buffer_mem_usage() as i64, + ); + drop(state_cache); metrics::set_gauge( &metrics::STORE_BEACON_HISTORIC_STATE_CACHE_SIZE, hsc_metrics.num_state as i64, ); - metrics::set_gauge( + metrics::set_gauge_vec( &metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_SIZE, + COLD_METRIC, hsc_metrics.num_hdiff as i64, ); - metrics::set_gauge( + metrics::set_gauge_vec( &metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_BYTE_SIZE, + COLD_METRIC, hsc_metrics.hdiff_byte_size as i64, ); @@ -887,14 +955,6 @@ impl, Cold: ItemStore> HotColdDB } } - pub fn put_state_summary( - &self, - state_root: &Hash256, - summary: HotStateSummary, - ) -> Result<(), Error> { - self.hot_db.put(state_root, &summary) - } - /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); @@ -986,7 +1046,14 @@ impl, Cold: ItemStore> HotColdDB }; // It's a bit redundant but we elect to cache the state here and down below. let mut opt_state = self - .load_hot_state(&state_root, true)? + .load_hot_state(&state_root, true) + .map_err(|e| { + Error::LoadingHotStateError( + format!("get advanced {block_root} {max_slot}"), + state_root, + e.into(), + ) + })? .map(|(state, _block_root)| (state_root, state)); if let Some((state_root, state)) = opt_state.as_mut() { @@ -1058,7 +1125,7 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, - ) -> Result, Error> { + ) -> Result, Error> { HybridForwardsBlockRootsIterator::new( self, DBColumn::BeaconBlockRoots, @@ -1088,7 +1155,7 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, - ) -> Result, Error> { + ) -> Result, Error> { HybridForwardsStateRootsIterator::new( self, DBColumn::BeaconStateRoots, @@ -1098,41 +1165,6 @@ impl, Cold: ItemStore> HotColdDB ) } - /// Load an epoch boundary state by using the hot state summary look-up. - /// - /// Will fall back to the cold DB if a hot state summary is not found. - /// - /// NOTE: only used in tests at the moment - pub fn load_epoch_boundary_state( - &self, - state_root: &Hash256, - ) -> Result>, Error> { - if let Some(HotStateSummary { - epoch_boundary_state_root, - .. - }) = self.load_hot_state_summary(state_root)? - { - // NOTE: minor inefficiency here because we load an unnecessary hot state summary - let (state, _) = self - .load_hot_state(&epoch_boundary_state_root, true)? - .ok_or(HotColdDBError::MissingEpochBoundaryState( - epoch_boundary_state_root, - *state_root, - ))?; - Ok(Some(state)) - } else { - // Try the cold DB - match self.load_cold_state_slot(state_root)? { - Some(state_slot) => { - let epoch_boundary_slot = - state_slot / E::slots_per_epoch() * E::slots_per_epoch(); - self.load_cold_state_by_slot(epoch_boundary_slot).map(Some) - } - None => Ok(None), - } - } - } - pub fn put_item(&self, key: &Hash256, item: &I) -> Result<(), Error> { self.hot_db.put(key, item) } @@ -1206,13 +1238,39 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteState(state_root, slot) => { // Delete the hot state summary. key_value_batch.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateSummary, + DBColumn::BeaconStateHotSummary, state_root.as_slice().to_vec(), )); - if slot.is_none_or(|slot| slot % E::slots_per_epoch() == 0) { + if let Some(slot) = slot { + match self.hot_storage_strategy(slot)? { + StorageStrategy::Snapshot => { + // Full state stored in this position + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateHotSnapshot, + state_root.as_slice().to_vec(), + )); + } + StorageStrategy::DiffFrom(_) => { + // Diff stored in this position + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateHotDiff, + state_root.as_slice().to_vec(), + )); + } + StorageStrategy::ReplayFrom(_) => { + // Nothing else to delete + } + } + } else { + // NOTE(hdiff): Attempt to delete both snapshots and diffs if we don't know + // the slot. key_value_batch.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconState, + DBColumn::BeaconStateHotSnapshot, + state_root.as_slice().to_vec(), + )); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateHotDiff, state_root.as_slice().to_vec(), )); } @@ -1420,9 +1478,6 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - // Avoid storing states in the database if they already exist in the state cache. - // The exception to this is the finalized state, which must exist in the cache before it - // is stored on disk. match self.state_cache.lock().put_state( *state_root, state.get_latest_block_root(*state_root), @@ -1443,28 +1498,127 @@ impl, Cold: ItemStore> HotColdDB state_slot = %state.slot(), "State already exists in state cache", ); - return Ok(()); + // NOTE: We used to return early here, but had some issues with states being + // in the cache but not on disk. Instead of relying on the cache we try loading + // the state summary below and rely on that instead. } - PutStateOutcome::Finalized => {} // Continue to store. + // Continue to store. + PutStateOutcome::Finalized | PutStateOutcome::PreFinalizedHDiffBuffer => {} } - // On the epoch boundary, store the full state. - if state.slot() % E::slots_per_epoch() == 0 { + // Computing diffs is expensive so we avoid it if we already have this state stored on + // disk. + if self.load_hot_state_summary(state_root)?.is_some() { debug!( slot = %state.slot(), ?state_root, - "Storing full state on epoch boundary" + "Skipping storage of state already in the DB" ); - store_full_state(state_root, state, ops)?; + return Ok(()); } + let summary = self.store_hot_state_summary(state_root, state, ops)?; + self.store_hot_state_diffs(state_root, state, ops)?; + + debug!( + ?state_root, + slot = %state.slot(), + storage_strategy = ?self.hot_storage_strategy(state.slot())?, + diff_base_state = %summary.diff_base_state, + previous_state_root = ?summary.previous_state_root, + "Storing hot state summary and diffs" + ); + + Ok(()) + } + + /// Store a post-finalization state efficiently in the hot database. + pub fn store_hot_state_summary( + &self, + state_root: &Hash256, + state: &BeaconState, + ops: &mut Vec, + ) -> Result { // Store a summary of the state. // We store one even for the epoch boundary states, as we may need their slots // when doing a look up by state root. - let hot_state_summary = HotStateSummary::new(state_root, state)?; - let op = hot_state_summary.as_kv_store_op(*state_root); - ops.push(op); + let hot_state_summary = HotStateSummary::new( + self, + *state_root, + state, + self.hot_storage_strategy(state.slot())?, + )?; + ops.push(hot_state_summary.as_kv_store_op(*state_root)); + Ok(hot_state_summary) + } + pub fn store_hot_state_diffs( + &self, + state_root: &Hash256, + state: &BeaconState, + ops: &mut Vec, + ) -> Result<(), Error> { + let slot = state.slot(); + let storage_strategy = self.hot_storage_strategy(slot)?; + match storage_strategy { + StorageStrategy::ReplayFrom(_) => { + // Already have persisted the state summary, don't persist anything else + } + StorageStrategy::Snapshot => { + self.store_hot_state_as_snapshot(state_root, state, ops)?; + } + StorageStrategy::DiffFrom(from_slot) => { + let from_root = get_ancestor_state_root(self, state, from_slot).map_err(|e| { + Error::StateSummaryIteratorError { + error: e, + from_state_root: *state_root, + from_state_slot: state.slot(), + target_slot: slot, + } + })?; + self.store_hot_state_as_diff(state_root, state, from_root, ops)?; + } + } + Ok(()) + } + + fn store_hot_state_as_diff( + &self, + state_root: &Hash256, + state: &BeaconState, + from_root: Hash256, + ops: &mut Vec, + ) -> Result<(), Error> { + let base_buffer = { + let _t = metrics::start_timer_vec( + &metrics::BEACON_HDIFF_BUFFER_LOAD_BEFORE_STORE_TIME, + HOT_METRIC, + ); + self.load_hot_hdiff_buffer(from_root).map_err(|e| { + Error::LoadingHotHdiffBufferError( + format!("store state as diff {state_root:?} {}", state.slot()), + from_root, + e.into(), + ) + })? + }; + let target_buffer = HDiffBuffer::from_state(state.clone()); + let diff = { + let _timer = metrics::start_timer_vec(&metrics::BEACON_HDIFF_COMPUTE_TIME, HOT_METRIC); + HDiff::compute(&base_buffer, &target_buffer, &self.config)? + }; + let diff_bytes = diff.as_ssz_bytes(); + let layer = HierarchyConfig::exponent_for_slot(state.slot()); + metrics::observe_vec( + &metrics::BEACON_HDIFF_SIZES, + &[&layer.to_string()], + diff_bytes.len() as f64, + ); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateHotDiff, + state_root.as_slice().to_vec(), + diff_bytes, + )); Ok(()) } @@ -1483,7 +1637,9 @@ impl, Cold: ItemStore> HotColdDB warn!(?state_root, "State cache missed"); } - let state_from_disk = self.load_hot_state(state_root, update_cache)?; + let state_from_disk = self.load_hot_state(state_root, update_cache).map_err(|e| { + Error::LoadingHotStateError("get state".to_owned(), *state_root, e.into()) + })?; if let Some((mut state, block_root)) = state_from_disk { state.update_tree_hash_cache()?; @@ -1516,6 +1672,88 @@ impl, Cold: ItemStore> HotColdDB } } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { + if let Some(buffer) = self + .state_cache + .lock() + .get_hdiff_buffer_by_state_root(state_root) + { + return Ok(buffer); + } + + let Some(HotStateSummary { + slot, + diff_base_state, + .. + }) = self.load_hot_state_summary(&state_root)? + else { + return Err(Error::MissingHotStateSummary(state_root)); + }; + + let buffer = match self.hot_storage_strategy(slot)? { + StorageStrategy::Snapshot => { + let Some(state) = self.load_hot_state_as_snapshot(state_root)? else { + let existing_snapshots = self.load_hot_state_snapshot_roots()?; + debug!( + requested = ?state_root, + existing_snapshots = ?existing_snapshots, + "Missing hot state snapshot" + ); + return Err(Error::MissingHotStateSnapshot(state_root, slot)); + }; + HDiffBuffer::from_state(state) + } + StorageStrategy::DiffFrom(from_slot) => { + let from_state_root = diff_base_state.get_root(from_slot)?; + let mut buffer = self.load_hot_hdiff_buffer(from_state_root).map_err(|e| { + Error::LoadingHotHdiffBufferError( + format!("load hdiff DiffFrom {from_slot} {state_root}"), + from_state_root, + e.into(), + ) + })?; + let diff = self.load_hot_hdiff(state_root)?; + { + let _timer = + metrics::start_timer_vec(&metrics::BEACON_HDIFF_APPLY_TIME, HOT_METRIC); + diff.apply(&mut buffer, &self.config)?; + } + buffer + } + StorageStrategy::ReplayFrom(from_slot) => { + let from_state_root = diff_base_state.get_root(from_slot)?; + self.load_hot_hdiff_buffer(from_state_root).map_err(|e| { + Error::LoadingHotHdiffBufferError( + format!("load hdiff ReplayFrom {from_slot} {state_root}"), + from_state_root, + e.into(), + ) + })? + } + }; + + // Add buffer to cache for future calls. + self.state_cache + .lock() + .put_hdiff_buffer(state_root, slot, &buffer); + + Ok(buffer) + } + + fn load_hot_hdiff(&self, state_root: Hash256) -> Result { + let bytes = { + let _t = metrics::start_timer_vec(&metrics::BEACON_HDIFF_READ_TIME, HOT_METRIC); + self.hot_db + .get_bytes(DBColumn::BeaconStateHotDiff, state_root.as_slice())? + .ok_or(HotColdDBError::MissingHotHDiff(state_root))? + }; + let hdiff = { + let _t = metrics::start_timer_vec(&metrics::BEACON_HDIFF_DECODE_TIME, HOT_METRIC); + HDiff::from_ssz_bytes(&bytes)? + }; + Ok(hdiff) + } + /// Load a post-finalization state from the hot database. /// /// Will replay blocks from the nearest epoch boundary. @@ -1532,64 +1770,64 @@ impl, Cold: ItemStore> HotColdDB if let Some(HotStateSummary { slot, latest_block_root, - epoch_boundary_state_root, + diff_base_state, + .. }) = self.load_hot_state_summary(state_root)? { - let mut boundary_state = - get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( - HotColdDBError::MissingEpochBoundaryState( - epoch_boundary_state_root, - *state_root, - ), - )?; + let mut state = match self.hot_storage_strategy(slot)? { + strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { + let buffer_timer = metrics::start_timer_vec( + &metrics::BEACON_HDIFF_BUFFER_LOAD_TIME, + HOT_METRIC, + ); + let buffer = self.load_hot_hdiff_buffer(*state_root).map_err(|e| { + Error::LoadingHotHdiffBufferError( + format!("load state {strat:?} {slot}"), + *state_root, + e.into(), + ) + })?; + drop(buffer_timer); + let mut state = buffer.as_state(&self.spec)?; - // Immediately rebase the state from disk on the finalized state so that we can reuse - // parts of the tree for state root calculation in `replay_blocks`. - self.state_cache - .lock() - .rebase_on_finalized(&mut boundary_state, &self.spec)?; + // Immediately rebase the state from diffs on the finalized state so that we + // can utilise structural sharing and don't consume excess memory. + self.state_cache + .lock() + .rebase_on_finalized(&mut state, &self.spec)?; - // Optimization to avoid even *thinking* about replaying blocks if we're already - // on an epoch boundary. - let mut state = if slot % E::slots_per_epoch() == 0 { - boundary_state - } else { - // If replaying blocks, and `update_cache` is true, also cache the epoch boundary - // state that this state is based on. It may be useful as the basis of more states - // in the same epoch. - let state_cache_hook = |state_root, state: &mut BeaconState| { - if !update_cache || state.slot() % E::slots_per_epoch() != 0 { - return Ok(()); - } - // Ensure all caches are built before attempting to cache. - state.update_tree_hash_cache()?; - state.build_all_caches(&self.spec)?; + state + } + StorageStrategy::ReplayFrom(from_slot) => { + let from_state_root = diff_base_state.get_root(from_slot)?; - let latest_block_root = state.get_latest_block_root(state_root); - if let PutStateOutcome::New(_) = - self.state_cache - .lock() - .put_state(state_root, latest_block_root, state)? - { - debug!( - ?state_root, - state_slot = %state.slot(), - descendant_slot = %slot, - "Cached ancestor state", - ); - } - Ok(()) - }; - let blocks = - self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; - let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); - self.replay_blocks( - boundary_state, - blocks, - slot, - no_state_root_iter(), - Some(Box::new(state_cache_hook)), - )? + let (mut base_state, _) = self + .load_hot_state(&from_state_root, update_cache) + .map_err(|e| { + Error::LoadingHotStateError( + format!("load state ReplayFrom {from_slot}"), + *state_root, + e.into(), + ) + })? + .ok_or(HotColdDBError::MissingHotState { + state_root: from_state_root, + requested_by_state_summary: (*state_root, slot), + })?; + + // Immediately rebase the state from disk on the finalized state so that we can + // reuse parts of the tree for state root calculation in `replay_blocks`. + self.state_cache + .lock() + .rebase_on_finalized(&mut base_state, &self.spec)?; + + self.load_hot_state_using_replay( + base_state, + slot, + latest_block_root, + update_cache, + )? + } }; state.apply_pending_mutations()?; @@ -1599,6 +1837,56 @@ impl, Cold: ItemStore> HotColdDB } } + pub fn load_hot_state_using_replay( + &self, + base_state: BeaconState, + slot: Slot, + latest_block_root: Hash256, + update_cache: bool, + ) -> Result, Error> { + if base_state.slot() == slot { + return Ok(base_state); + } + + let blocks = self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; + let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); + + // If replaying blocks, and `update_cache` is true, also cache the epoch boundary + // state that this state is based on. It may be useful as the basis of more states + // in the same epoch. + let state_cache_hook = |state_root, state: &mut BeaconState| { + if !update_cache || state.slot() % E::slots_per_epoch() != 0 { + return Ok(()); + } + // Ensure all caches are built before attempting to cache. + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + + let latest_block_root = state.get_latest_block_root(state_root); + if let PutStateOutcome::New(_) = + self.state_cache + .lock() + .put_state(state_root, latest_block_root, state)? + { + debug!( + ?state_root, + state_slot = %state.slot(), + descendant_slot = %slot, + "Cached ancestor state", + ); + } + Ok(()) + }; + + self.replay_blocks( + base_state, + blocks, + slot, + no_state_root_iter(), + Some(Box::new(state_cache_hook)), + ) + } + pub fn store_cold_state_summary( &self, state_root: &Hash256, @@ -1624,7 +1912,7 @@ impl, Cold: ItemStore> HotColdDB self.store_cold_state_summary(state_root, state.slot(), ops)?; let slot = state.slot(); - match self.hierarchy.storage_strategy(slot)? { + match self.cold_storage_strategy(slot)? { StorageStrategy::ReplayFrom(from) => { debug!( strategy = "replay", @@ -1699,6 +1987,54 @@ impl, Cold: ItemStore> HotColdDB } } + pub fn store_hot_state_as_snapshot( + &self, + state_root: &Hash256, + state: &BeaconState, + ops: &mut Vec, + ) -> Result<(), Error> { + let bytes = state.as_ssz_bytes(); + let compressed_value = { + let _timer = metrics::start_timer(&metrics::STORE_BEACON_STATE_FREEZER_COMPRESS_TIME); + let mut out = Vec::with_capacity(self.config.estimate_compressed_size(bytes.len())); + let mut encoder = Encoder::new(&mut out, self.config.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(&bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + out + }; + + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateHotSnapshot, + state_root.as_slice().to_vec(), + compressed_value, + )); + Ok(()) + } + + fn load_hot_state_bytes_as_snapshot( + &self, + state_root: Hash256, + ) -> Result>, Error> { + match self + .hot_db + .get_bytes(DBColumn::BeaconStateHotSnapshot, state_root.as_slice())? + { + Some(bytes) => { + let _timer = + metrics::start_timer(&metrics::STORE_BEACON_STATE_FREEZER_DECOMPRESS_TIME); + let mut ssz_bytes = + Vec::with_capacity(self.config.estimate_decompressed_size(bytes.len())); + let mut decoder = Decoder::new(&*bytes).map_err(Error::Compression)?; + decoder + .read_to_end(&mut ssz_bytes) + .map_err(Error::Compression)?; + Ok(Some(ssz_bytes)) + } + None => Ok(None), + } + } + fn load_cold_state_as_snapshot(&self, slot: Slot) -> Result>, Error> { Ok(self .load_cold_state_bytes_as_snapshot(slot)? @@ -1706,6 +2042,22 @@ impl, Cold: ItemStore> HotColdDB .transpose()?) } + fn load_hot_state_as_snapshot( + &self, + state_root: Hash256, + ) -> Result>, Error> { + Ok(self + .load_hot_state_bytes_as_snapshot(state_root)? + .map(|bytes| BeaconState::from_ssz_bytes(&bytes, &self.spec)) + .transpose()?) + } + + fn load_hot_state_snapshot_roots(&self) -> Result, Error> { + self.hot_db + .iter_column_keys::(DBColumn::BeaconStateHotSnapshot) + .collect() + } + pub fn store_cold_state_as_diff( &self, state: &BeaconState, @@ -1714,15 +2066,24 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { // Load diff base state bytes. let (_, base_buffer) = { - let _t = metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_LOAD_FOR_STORE_TIME); + let _t = metrics::start_timer_vec( + &metrics::BEACON_HDIFF_BUFFER_LOAD_BEFORE_STORE_TIME, + COLD_METRIC, + ); self.load_hdiff_buffer_for_slot(from_slot)? }; let target_buffer = HDiffBuffer::from_state(state.clone()); let diff = { - let _timer = metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_COMPUTE_TIME); + let _timer = metrics::start_timer_vec(&metrics::BEACON_HDIFF_COMPUTE_TIME, COLD_METRIC); HDiff::compute(&base_buffer, &target_buffer, &self.config)? }; let diff_bytes = diff.as_ssz_bytes(); + let layer = HierarchyConfig::exponent_for_slot(state.slot()); + metrics::observe_vec( + &metrics::BEACON_HDIFF_SIZES, + &[&layer.to_string()], + diff_bytes.len() as f64, + ); ops.push(KeyValueStoreOp::PutKeyValue( DBColumn::BeaconStateDiff, @@ -1746,7 +2107,7 @@ impl, Cold: ItemStore> HotColdDB /// /// Will reconstruct the state if it lies between restore points. pub fn load_cold_state_by_slot(&self, slot: Slot) -> Result, Error> { - let storage_strategy = self.hierarchy.storage_strategy(slot)?; + let storage_strategy = self.cold_storage_strategy(slot)?; // Search for a state from this slot or a recent prior slot in the historic state cache. let mut historic_state_cache = self.historic_state_cache.lock(); @@ -1775,10 +2136,10 @@ impl, Cold: ItemStore> HotColdDB // Load using the diff hierarchy. For states that require replay we recurse into this // function so that we can try to get their pre-state *as a state* rather than an hdiff // buffer. - match self.hierarchy.storage_strategy(slot)? { + match self.cold_storage_strategy(slot)? { StorageStrategy::Snapshot | StorageStrategy::DiffFrom(_) => { let buffer_timer = - metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_LOAD_TIME); + metrics::start_timer_vec(&metrics::BEACON_HDIFF_BUFFER_LOAD_TIME, COLD_METRIC); let (_, buffer) = self.load_hdiff_buffer_for_slot(slot)?; drop(buffer_timer); let state = buffer.as_state(&self.spec)?; @@ -1847,13 +2208,13 @@ impl, Cold: ItemStore> HotColdDB fn load_hdiff_for_slot(&self, slot: Slot) -> Result { let bytes = { - let _t = metrics::start_timer(&metrics::BEACON_HDIFF_READ_TIMES); + let _t = metrics::start_timer_vec(&metrics::BEACON_HDIFF_READ_TIME, COLD_METRIC); self.cold_db .get_bytes(DBColumn::BeaconStateDiff, &slot.as_u64().to_be_bytes())? .ok_or(HotColdDBError::MissingHDiff(slot))? }; let hdiff = { - let _t = metrics::start_timer(&metrics::BEACON_HDIFF_DECODE_TIMES); + let _t = metrics::start_timer_vec(&metrics::BEACON_HDIFF_DECODE_TIME, COLD_METRIC); HDiff::from_ssz_bytes(&bytes)? }; Ok(hdiff) @@ -1867,15 +2228,15 @@ impl, Cold: ItemStore> HotColdDB %slot, "Hit hdiff buffer cache" ); - metrics::inc_counter(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT); + metrics::inc_counter_vec(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT, COLD_METRIC); return Ok((slot, buffer)); } - metrics::inc_counter(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_MISS); + metrics::inc_counter_vec(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_MISS, COLD_METRIC); // Load buffer for the previous state. // This amount of recursion (<10 levels) should be OK. let t = std::time::Instant::now(); - match self.hierarchy.storage_strategy(slot)? { + match self.cold_storage_strategy(slot)? { // Base case. StorageStrategy::Snapshot => { let state = self @@ -1904,7 +2265,7 @@ impl, Cold: ItemStore> HotColdDB let diff = self.load_hdiff_for_slot(slot)?; { let _timer = - metrics::start_timer(&metrics::STORE_BEACON_HDIFF_BUFFER_APPLY_TIME); + metrics::start_timer_vec(&metrics::BEACON_HDIFF_APPLY_TIME, COLD_METRIC); diff.apply(&mut buffer, &self.config)?; } @@ -2176,11 +2537,11 @@ impl, Cold: ItemStore> HotColdDB /// Initialise the anchor info for checkpoint sync starting from `block`. pub fn init_anchor_info( &self, - block: BeaconBlockRef<'_, E>, + oldest_block_parent: Hash256, + oldest_block_slot: Slot, + anchor_slot: Slot, retain_historic_states: bool, ) -> Result { - let anchor_slot = block.slot(); - // Set the `state_upper_limit` to the slot of the *next* checkpoint. let next_snapshot_slot = self.hierarchy.next_snapshot_slot(anchor_slot)?; let state_upper_limit = if !retain_historic_states { @@ -2188,17 +2549,12 @@ impl, Cold: ItemStore> HotColdDB } else { next_snapshot_slot }; - let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 { - // Genesis archive node: no anchor because we *will* store all states. - ANCHOR_FOR_ARCHIVE_NODE - } else { - AnchorInfo { - anchor_slot, - oldest_block_slot: anchor_slot, - oldest_block_parent: block.parent_root(), - state_upper_limit, - state_lower_limit: self.spec.genesis_slot, - } + let anchor_info = AnchorInfo { + anchor_slot, + oldest_block_slot, + oldest_block_parent, + state_upper_limit, + state_lower_limit: self.spec.genesis_slot, }; self.compare_and_set_anchor_info(ANCHOR_UNINITIALIZED, anchor_info) } @@ -2245,7 +2601,8 @@ impl, Cold: ItemStore> HotColdDB /// Load the anchor info from disk. fn load_anchor_info(hot_db: &Hot) -> Result { Ok(hot_db - .get(&ANCHOR_INFO_KEY)? + .get(&ANCHOR_INFO_KEY) + .map_err(|e| Error::LoadAnchorInfo(e.into()))? .unwrap_or(ANCHOR_UNINITIALIZED)) } @@ -2328,7 +2685,9 @@ impl, Cold: ItemStore> HotColdDB /// Load the blob info from disk, but do not set `self.blob_info`. fn load_blob_info(&self) -> Result, Error> { - self.hot_db.get(&BLOB_INFO_KEY) + self.hot_db + .get(&BLOB_INFO_KEY) + .map_err(|e| Error::LoadBlobInfo(e.into())) } /// Store the given `blob_info` to disk. @@ -2373,7 +2732,9 @@ impl, Cold: ItemStore> HotColdDB /// Load the blob info from disk, but do not set `self.data_column_info`. fn load_data_column_info(&self) -> Result, Error> { - self.hot_db.get(&DATA_COLUMN_INFO_KEY) + self.hot_db + .get(&DATA_COLUMN_INFO_KEY) + .map_err(|e| Error::LoadDataColumnInfo(e.into())) } /// Store the given `data_column_info` to disk. @@ -2432,7 +2793,9 @@ impl, Cold: ItemStore> HotColdDB /// Load previously-stored config from disk. fn load_config(&self) -> Result, Error> { - self.hot_db.get(&CONFIG_KEY) + self.hot_db + .get(&CONFIG_KEY) + .map_err(|e| Error::LoadConfig(e.into())) } /// Write the config to disk. @@ -2442,18 +2805,24 @@ impl, Cold: ItemStore> HotColdDB /// Load the split point from disk, sans block root. fn load_split_partial(&self) -> Result, Error> { - self.hot_db.get(&SPLIT_KEY) + self.hot_db + .get(&SPLIT_KEY) + .map_err(|e| Error::LoadSplit(e.into())) } /// Load the split point from disk, including block root. fn load_split(&self) -> Result, Error> { match self.load_split_partial()? { Some(mut split) => { + debug!(?split, "Loaded split partial"); // Load the hot state summary to get the block root. - let summary = self.load_hot_state_summary(&split.state_root)?.ok_or( - HotColdDBError::MissingSplitState(split.state_root, split.slot), - )?; - split.block_root = summary.latest_block_root; + let latest_block_root = self + .load_block_root_from_summary_any_version(&split.state_root) + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; + split.block_root = latest_block_root; Ok(Some(split)) } None => Ok(None), @@ -2478,13 +2847,41 @@ impl, Cold: ItemStore> HotColdDB &self, state_root: &Hash256, ) -> Result, Error> { - self.hot_db.get(state_root) + self.hot_db + .get(state_root) + .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) + } + + /// Load a hot state's summary in V22 format, given its root. + pub fn load_hot_state_summary_v22( + &self, + state_root: &Hash256, + ) -> Result, Error> { + self.hot_db + .get(state_root) + .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) + } + + /// Load the latest block root for a hot state summary either in modern form, or V22 form. + /// + /// This function is required to open a V22 database for migration to V24, or vice versa. + pub fn load_block_root_from_summary_any_version( + &self, + state_root: &Hash256, + ) -> Option { + if let Ok(Some(summary)) = self.load_hot_state_summary(state_root) { + return Some(summary.latest_block_root); + } + if let Ok(Some(summary)) = self.load_hot_state_summary_v22(state_root) { + return Some(summary.latest_block_root); + } + None } /// Load all hot state summaries present in the hot DB pub fn load_hot_state_summaries(&self) -> Result, Error> { self.hot_db - .iter_column::(DBColumn::BeaconStateSummary) + .iter_column::(DBColumn::BeaconStateHotSummary) .map(|res| { let (state_root, value) = res?; let summary = HotStateSummary::from_ssz_bytes(&value)?; @@ -2501,27 +2898,13 @@ impl, Cold: ItemStore> HotColdDB /// Run a compaction pass on the freezer DB to free up space used by deleted states. pub fn compact_freezer(&self) -> Result<(), Error> { - let current_schema_columns = vec![ + let columns = vec![ DBColumn::BeaconColdStateSummary, DBColumn::BeaconStateSnapshot, DBColumn::BeaconStateDiff, DBColumn::BeaconStateRoots, ]; - // We can remove this once schema V21 has been gone for a while. - let previous_schema_columns = vec![ - DBColumn::BeaconState, - DBColumn::BeaconStateSummary, - DBColumn::BeaconBlockRootsChunked, - DBColumn::BeaconStateRootsChunked, - DBColumn::BeaconRestorePoint, - DBColumn::BeaconHistoricalRoots, - DBColumn::BeaconRandaoMixes, - DBColumn::BeaconHistoricalSummaries, - ]; - let mut columns = current_schema_columns; - columns.extend(previous_schema_columns); - for column in columns { info!(?column, "Starting compaction"); self.cold_db.compact_column(column)?; @@ -2535,25 +2918,6 @@ impl, Cold: ItemStore> HotColdDB self.config.compact_on_prune } - /// Load the checkpoint to begin pruning from (the "old finalized checkpoint"). - pub fn load_pruning_checkpoint(&self) -> Result, Error> { - Ok(self - .hot_db - .get(&PRUNING_CHECKPOINT_KEY)? - .map(|pc: PruningCheckpoint| pc.checkpoint)) - } - - /// Store the checkpoint to begin pruning from (the "old finalized checkpoint"). - pub fn store_pruning_checkpoint(&self, checkpoint: Checkpoint) -> Result<(), Error> { - self.hot_db - .do_atomically(vec![self.pruning_checkpoint_store_op(checkpoint)]) - } - - /// Create a staged store for the pruning checkpoint. - pub fn pruning_checkpoint_store_op(&self, checkpoint: Checkpoint) -> KeyValueStoreOp { - PruningCheckpoint { checkpoint }.as_kv_store_op(PRUNING_CHECKPOINT_KEY) - } - /// Load the timestamp of the last compaction as a `Duration` since the UNIX epoch. pub fn load_compaction_timestamp(&self) -> Result, Error> { Ok(self @@ -2590,6 +2954,30 @@ impl, Cold: ItemStore> HotColdDB Ok(ops) } + /// Return a single block root from the cold DB. + /// + /// If the slot is unavailable due to partial block history, `Ok(None)` will be returned. + pub fn get_cold_block_root(&self, slot: Slot) -> Result, Error> { + Ok(self + .cold_db + .get_bytes(DBColumn::BeaconBlockRoots, &slot.as_u64().to_be_bytes())? + .map(|bytes| Hash256::from_ssz_bytes(&bytes)) + .transpose()?) + } + + /// Return a single state root from the cold DB. + /// + /// If the slot is unavailable due to partial state history, `Ok(None)` will be returned. + /// + /// This function will usually only work on an archive node. + pub fn get_cold_state_root(&self, slot: Slot) -> Result, Error> { + Ok(self + .cold_db + .get_bytes(DBColumn::BeaconStateRoots, &slot.as_u64().to_be_bytes())? + .map(|bytes| Hash256::from_ssz_bytes(&bytes)) + .transpose()?) + } + /// Try to prune all execution payloads, returning early if there is no need to prune. pub fn try_prune_execution_payloads(&self, force: bool) -> Result<(), Error> { let split = self.get_split_info(); @@ -2871,32 +3259,13 @@ impl, Cold: ItemStore> HotColdDB // migrating to the tree-states schema (delete everything in the freezer then start afresh). let mut cold_ops = vec![]; - let current_schema_columns = vec![ + let columns = vec![ DBColumn::BeaconColdStateSummary, DBColumn::BeaconStateSnapshot, DBColumn::BeaconStateDiff, DBColumn::BeaconStateRoots, ]; - // This function is intended to be able to clean up leftover V21 freezer database stuff in - // the case where the V22 schema upgrade failed *after* commiting the version increment but - // *before* cleaning up the freezer DB. - // - // We can remove this once schema V21 has been gone for a while. - let previous_schema_columns = vec![ - DBColumn::BeaconState, - DBColumn::BeaconStateSummary, - DBColumn::BeaconBlockRootsChunked, - DBColumn::BeaconStateRootsChunked, - DBColumn::BeaconRestorePoint, - DBColumn::BeaconHistoricalRoots, - DBColumn::BeaconRandaoMixes, - DBColumn::BeaconHistoricalSummaries, - ]; - - let mut columns = current_schema_columns; - columns.extend(previous_schema_columns); - for column in columns { for res in self.cold_db.iter_column_keys::>(column) { let key = res?; @@ -2934,7 +3303,7 @@ pub fn migrate_database, Cold: ItemStore>( finalized_state_root: Hash256, finalized_block_root: Hash256, finalized_state: &BeaconState, -) -> Result<(), Error> { +) -> Result { debug!( slot = %finalized_state.slot(), "Freezer migration started" @@ -2943,12 +3312,12 @@ pub fn migrate_database, Cold: ItemStore>( // 0. Check that the migration is sensible. // The new finalized state must increase the current split slot, and lie on an epoch // boundary (in order for the hot state summary scheme to work). - let current_split_slot = store.split.read_recursive().slot; + let current_split = *store.split.read_recursive(); let anchor_info = store.anchor_info.read_recursive().clone(); - if finalized_state.slot() < current_split_slot { + if finalized_state.slot() < current_split.slot { return Err(HotColdDBError::FreezeSlotError { - current_split_slot, + current_split_slot: current_split.slot, proposed_split_slot: finalized_state.slot(), } .into()); @@ -2965,7 +3334,7 @@ pub fn migrate_database, Cold: ItemStore>( // Iterate in descending order until the current split slot let state_roots: Vec<_> = process_results(RootsIterator::new(&store, finalized_state), |iter| { - iter.take_while(|(_, _, slot)| *slot >= current_split_slot) + iter.take_while(|(_, _, slot)| *slot >= current_split.slot) .collect() })?; @@ -2990,7 +3359,7 @@ pub fn migrate_database, Cold: ItemStore>( // Only store the cold state if it's on a diff boundary. // Calling `store_cold_state_summary` instead of `store_cold_state` for those allows us // to skip loading many hot states. - if let StorageStrategy::ReplayFrom(from) = store.hierarchy.storage_strategy(slot)? { + if let StorageStrategy::ReplayFrom(from) = store.cold_storage_strategy(slot)? { // Store slot -> state_root and state_root -> slot mappings. debug!( strategy = "replay", @@ -3024,40 +3393,41 @@ pub fn migrate_database, Cold: ItemStore>( // in the worst case we will restart with the old split and re-run the migration. store.cold_db.do_atomically(cold_db_block_ops)?; store.cold_db.sync()?; - { + let new_split = { let mut split_guard = store.split.write(); - let latest_split_slot = split_guard.slot; + let latest_split = *split_guard; // Detect a situation where the split point is (erroneously) changed from more than one // place in code. - if latest_split_slot != current_split_slot { + if latest_split.slot != current_split.slot { error!( - previous_split_slot = %current_split_slot, - current_split_slot = %latest_split_slot, + previous_split_slot = %current_split.slot, + current_split_slot = %latest_split.slot, "Race condition detected: Split point changed while copying states to the freezer" ); // Assume the freezing procedure will be retried in case this happens. return Err(Error::SplitPointModified( - current_split_slot, - latest_split_slot, + current_split.slot, + latest_split.slot, )); } // Before updating the in-memory split value, we flush it to disk first, so that should the // OS process die at this point, we pick up from the right place after a restart. - let split = Split { + let new_split = Split { slot: finalized_state.slot(), state_root: finalized_state_root, block_root: finalized_block_root, }; - store.hot_db.put_sync(&SPLIT_KEY, &split)?; + store.hot_db.put_sync(&SPLIT_KEY, &new_split)?; // Split point is now persisted in the hot database on disk. The in-memory split point // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update // the in-memory split point now. - *split_guard = split; - } + *split_guard = new_split; + new_split + }; // Update the cache's view of the finalized state. store.update_finalized_state( @@ -3071,7 +3441,16 @@ pub fn migrate_database, Cold: ItemStore>( "Freezer migration complete" ); - Ok(()) + Ok(SplitChange { + previous: current_split, + new: new_split, + }) +} + +#[derive(Debug)] +pub struct SplitChange { + pub previous: Split, + pub new: Split, } /// Struct for storing the split slot and state root in the database. @@ -3108,19 +3487,221 @@ fn no_state_root_iter() -> Option), + LoadStateRootError(Box), + MissingStateRoot { + target_slot: Slot, + state_upper_limit: Slot, + }, + OutOfBoundsInitialSlot, +} + +/// Return the ancestor state root of a state beyond SlotsPerHistoricalRoot using the roots iterator +/// and the store +pub fn get_ancestor_state_root<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore>( + store: &'a HotColdDB, + from_state: &'a BeaconState, + target_slot: Slot, +) -> Result { + // Use the state itself for recent roots + if let Ok(target_state_root) = from_state.get_state_root(target_slot) { + return Ok(*target_state_root); + } + + // Fetch the anchor info prior to obtaining the split lock. We don't need to hold a lock because + // the `state_upper_limit` can't increase (and rug us) unless state pruning runs, and it never + // runs concurrently. + let state_upper_limit = store.get_anchor_info().state_upper_limit; + + // Hold the split lock so that state summaries are not pruned concurrently with this function + // running. + let split = store.split.read_recursive(); + + // If the state root is in range of the freezer DB's linear state root storage, fetch it + // directly from there. This is useful on archive nodes to avoid some of the complexity of + // traversing the sparse portion of the hdiff grid (prior to the split slot). It is also + // necessary for the v24 schema migration on archive nodes, where there isn't yet any grid + // to traverse. + if target_slot < split.slot && target_slot >= state_upper_limit { + drop(split); + return store + .get_cold_state_root(target_slot) + .map_err(Box::new) + .map_err(StateSummaryIteratorError::LoadStateRootError)? + .ok_or(StateSummaryIteratorError::MissingStateRoot { + target_slot, + state_upper_limit, + }); + } + + let mut state_root = { + // We can not start loading summaries from `state_root` since its summary has not yet been + // imported. This code path is called during block import. + // + // We need to choose a state_root to start that is + // - An ancestor of `from_state`, AND + // - Its state summary is already written (and not pruned) in the DB + // - Its slot is >= target_slot + // + // If we get to this codepath, (target_slot not in state's state_roots) it means that + // `state.slot()` is greater than `SlotsPerHistoricalRoot`, and `target_slot < state.slot() + // - SlotsPerHistoricalRoot`. + // + // Values we could start from: + // - `state.slot() - 1`: TODO if we don't immediately commit all each state to the DB + // individually, we may be attempting to read a state summary that is stored in a DB ops + // vector but not yet written to the DB. Also starting from this slot is wasteful as we + // know that the target slot is `< state.slot() - SlotsPerHistoricalRoot`. + // - `state.slot() - SlotsPerHistoricalRoot`: The most efficient slot to start. But we risk + // jumping to a state summary that has already been pruned. See the `max(.., split_slot)` + // below + let oldest_slot_in_state_roots = from_state + .slot() + .saturating_sub(Slot::new(E::SlotsPerHistoricalRoot::to_u64())); + + // Don't start with a slot that prior to the finalized state slot. We may be attempting to read + // a hot state summary that has already been pruned as part of the migration and error. HDiffs + // can reference diffs with a slot prior to the finalized checkpoint. But those are sparse so + // the probabiliy of hitting `MissingSummary` error is high. Instead, the summary for the + // finalized state is always available. + let start_slot = std::cmp::max(oldest_slot_in_state_roots, split.slot); + + *from_state + .get_state_root(start_slot) + .map_err(|_| StateSummaryIteratorError::OutOfBoundsInitialSlot)? + }; + + let mut previous_slot = None; + + loop { + let state_summary = store + .load_hot_state_summary(&state_root) + .map_err(|e| StateSummaryIteratorError::LoadSummaryError(Box::new(e)))? + .ok_or(StateSummaryIteratorError::MissingSummary(state_root))?; + + // Protect against infinite loops if the state summaries are not strictly descending + if let Some(previous_slot) = previous_slot { + if state_summary.slot >= previous_slot { + drop(split); + return Err(StateSummaryIteratorError::CircularSummaries { + state_root, + state_slot: state_summary.slot, + previous_slot, + }); + } + } + previous_slot = Some(state_summary.slot); + + match state_summary.slot.cmp(&target_slot) { + Ordering::Less => { + drop(split); + return Err(StateSummaryIteratorError::BelowTarget(state_summary.slot)); + } + Ordering::Equal => return Ok(state_root), + Ordering::Greater => {} // keep going + } + + // Jump to an older state summary that is an ancestor of `state_root` + if let OptionalDiffBaseState::BaseState(DiffBaseState { + slot, + state_root: diff_base_state_root, + }) = state_summary.diff_base_state + { + if target_slot <= slot { + // As an optimization use the HDiff state root to jump states faster + state_root = diff_base_state_root; + } + continue; + } + // Else jump slot by slot + state_root = state_summary.previous_state_root; + } +} + /// Struct for summarising a state in the hot database. /// /// Allows full reconstruction by replaying blocks. -#[derive(Debug, Clone, Copy, Default, Encode, Decode)] +#[derive(Debug, Clone, Copy, Encode, Decode)] pub struct HotStateSummary { pub slot: Slot, pub latest_block_root: Hash256, - epoch_boundary_state_root: Hash256, + pub latest_block_slot: Slot, + pub diff_base_state: OptionalDiffBaseState, + pub previous_state_root: Hash256, +} + +/// Information about the state that a hot state is diffed from or replays blocks from, if any. +/// +/// In the case of a snapshot, there is no diff base state, so this value will be +/// `DiffBaseState::Snapshot`. +#[derive(Debug, Clone, Copy, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +pub enum OptionalDiffBaseState { + // The SSZ crate requires *something* in each variant so we just store a u8 set to 0. + Snapshot(u8), + BaseState(DiffBaseState), +} + +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub struct DiffBaseState { + slot: Slot, + state_root: Hash256, +} + +impl OptionalDiffBaseState { + pub fn new(slot: Slot, state_root: Hash256) -> Self { + Self::BaseState(DiffBaseState { slot, state_root }) + } + + pub fn get_root(&self, slot: Slot) -> Result { + match *self { + Self::Snapshot(_) => Err(Error::SnapshotDiffBaseState { slot }), + Self::BaseState(DiffBaseState { + slot: stored_slot, + state_root, + }) => { + if stored_slot == slot { + Ok(state_root) + } else { + Err(Error::MismatchedDiffBaseState { + expected_slot: slot, + stored_slot, + }) + } + } + } + } +} + +// Succint rendering of (slot, state_root) pair for "Storing hot state summary and diffs" log +impl std::fmt::Display for OptionalDiffBaseState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Snapshot(_) => write!(f, "snapshot"), + Self::BaseState(base_state) => write!(f, "{base_state}"), + } + } +} + +impl std::fmt::Display for DiffBaseState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{:?}", self.slot, self.state_root) + } } impl StoreItem for HotStateSummary { fn db_column() -> DBColumn { - DBColumn::BeaconStateSummary + DBColumn::BeaconStateHotSummary } fn as_store_bytes(&self) -> Vec { @@ -3134,27 +3715,78 @@ impl StoreItem for HotStateSummary { impl HotStateSummary { /// Construct a new summary of the given state. - pub fn new(state_root: &Hash256, state: &BeaconState) -> Result { + pub fn new, Cold: ItemStore>( + store: &HotColdDB, + state_root: Hash256, + state: &BeaconState, + storage_strategy: StorageStrategy, + ) -> Result { // Fill in the state root on the latest block header if necessary (this happens on all // slots where there isn't a skip). - let latest_block_root = state.get_latest_block_root(*state_root); - let epoch_boundary_slot = state.slot() / E::slots_per_epoch() * E::slots_per_epoch(); - let epoch_boundary_state_root = if epoch_boundary_slot == state.slot() { - *state_root + let latest_block_root = state.get_latest_block_root(state_root); + + let get_state_root = |slot| { + if slot == state.slot() { + Ok::<_, Error>(state_root) + } else { + Ok(get_ancestor_state_root(store, state, slot).map_err(|e| { + Error::StateSummaryIteratorError { + error: e, + from_state_root: state_root, + from_state_slot: state.slot(), + target_slot: slot, + } + })?) + } + }; + let diff_base_slot = storage_strategy.diff_base_slot(); + let diff_base_state = if let Some(diff_base_slot) = diff_base_slot { + OptionalDiffBaseState::new(diff_base_slot, get_state_root(diff_base_slot)?) } else { - *state - .get_state_root(epoch_boundary_slot) - .map_err(HotColdDBError::HotStateSummaryError)? + OptionalDiffBaseState::Snapshot(0) + }; + + let previous_state_root = if state.slot() == 0 { + // Set to 0x0 for genesis state to prevent any sort of circular reference. + Hash256::zero() + } else { + get_state_root(state.slot().safe_sub(1_u64)?)? }; Ok(HotStateSummary { slot: state.slot(), latest_block_root, - epoch_boundary_state_root, + latest_block_slot: state.latest_block_header().slot, + diff_base_state, + previous_state_root, }) } } +/// Legacy hot state summary used in schema V22 and before. +/// +/// This can be deleted when we remove V22 support. +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub struct HotStateSummaryV22 { + pub slot: Slot, + pub latest_block_root: Hash256, + pub epoch_boundary_state_root: Hash256, +} + +impl StoreItem for HotStateSummaryV22 { + fn db_column() -> DBColumn { + DBColumn::BeaconStateSummary + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + /// Struct for summarising a state in the freezer database. #[derive(Debug, Clone, Copy, Default, Encode, Decode)] pub(crate) struct ColdStateSummary { diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 736585a72a..691c79ace7 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -1,2 +1 @@ -pub mod beacon_state; pub mod execution_payload; diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs deleted file mode 100644 index fd08e547f1..0000000000 --- a/beacon_node/store/src/impls/beacon_state.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::*; -use ssz::{DecodeError, Encode}; -use ssz_derive::Encode; - -pub fn store_full_state( - state_root: &Hash256, - state: &BeaconState, - ops: &mut Vec, -) -> Result<(), Error> { - let bytes = { - let _overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_OVERHEAD_TIMES); - StorageContainer::new(state).as_ssz_bytes() - }; - metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as u64); - metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); - ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - bytes, - )); - Ok(()) -} - -pub fn get_full_state, E: EthSpec>( - db: &KV, - state_root: &Hash256, - spec: &ChainSpec, -) -> Result>, Error> { - let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); - - match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? { - Some(bytes) => { - let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); - let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; - - metrics::stop_timer(overhead_timer); - metrics::stop_timer(total_timer); - metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); - metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, bytes.len() as u64); - - Ok(Some(container.try_into()?)) - } - None => Ok(None), - } -} - -/// A container for storing `BeaconState` components. -// TODO: would be more space efficient with the caches stored separately and referenced by hash -#[derive(Encode)] -pub struct StorageContainer { - state: BeaconState, - committee_caches: Vec>, -} - -impl StorageContainer { - /// Create a new instance for storing a `BeaconState`. - pub fn new(state: &BeaconState) -> Self { - Self { - state: state.clone(), - committee_caches: state.committee_caches().to_vec(), - } - } - - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't - // compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here. - let mut builder = ssz::SszDecoderBuilder::new(bytes); - - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?; - let committee_caches = decoder.decode_next()?; - - Ok(Self { - state, - committee_caches, - }) - } -} - -impl TryInto> for StorageContainer { - type Error = Error; - - fn try_into(mut self) -> Result, Error> { - let mut state = self.state; - - for i in (0..CACHED_EPOCHS).rev() { - if i >= self.committee_caches.len() { - return Err(Error::SszDecodeError(DecodeError::BytesInvalid( - "Insufficient committees for BeaconState".to_string(), - ))); - }; - - state.committee_caches_mut()[i] = self.committee_caches.remove(i); - } - - Ok(state) - } -} diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 8419dde4a2..47c5a1d9d8 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -384,9 +384,9 @@ fn slot_of_prev_restore_point(current_slot: Slot) -> Slot { #[cfg(test)] mod test { use super::*; - use crate::StoreConfig as Config; + use crate::{MemoryStore, StoreConfig as Config}; use beacon_chain::test_utils::BeaconChainHarness; - use beacon_chain::types::{ChainSpec, MainnetEthSpec}; + use beacon_chain::types::MainnetEthSpec; use std::sync::Arc; use types::FixedBytesExtended; @@ -400,10 +400,31 @@ mod test { harness.get_current_state() } + fn get_store() -> HotColdDB, MemoryStore> { + let store = + HotColdDB::open_ephemeral(Config::default(), Arc::new(E::default_spec())).unwrap(); + // Init achor info so anchor slot is set. Use a random block as it is only used for the + // parent_root + let _ = store + .init_anchor_info(Hash256::ZERO, Slot::new(0), Slot::new(0), false) + .unwrap(); + // Write a state with state root 0 which is the base `put_state` below tries to diff from + { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(1) + .fresh_ephemeral_store() + .build(); + let genesis_state = harness.get_current_state(); + store.put_state(&Hash256::ZERO, &genesis_state).unwrap(); + } + store + } + #[test] fn block_root_iter() { - let store = - HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal())).unwrap(); + let store = get_store::(); + let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); @@ -449,8 +470,8 @@ mod test { #[test] fn state_root_iter() { - let store = - HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal())).unwrap(); + let store = get_store::(); + let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 5b30971fd8..e996b47b72 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -35,10 +35,8 @@ pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::memory_store::MemoryStore; pub use crate::metadata::BlobInfo; pub use errors::Error; -pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; -use parking_lot::MutexGuard; use std::collections::HashSet; use std::sync::Arc; use strum::{EnumIter, EnumString, IntoStaticStr}; @@ -76,12 +74,6 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Execute either all of the operations in `batch` or none at all, returning an error. fn do_atomically(&self, batch: Vec) -> Result<(), Error>; - /// Return a mutex guard that can be used to synchronize sensitive transactions. - /// - /// This doesn't prevent other threads writing to the DB unless they also use - /// this method. In future we may implement a safer mandatory locking scheme. - fn begin_rw_transaction(&self) -> MutexGuard<()>; - /// Compact a single column in the database, freeing space used by deleted items. fn compact_column(&self, column: DBColumn) -> Result<(), Error>; @@ -91,7 +83,7 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { // i.e. entries being created and deleted. for column in [ DBColumn::BeaconState, - DBColumn::BeaconStateSummary, + DBColumn::BeaconStateHotSummary, DBColumn::BeaconBlock, ] { self.compact_column(column)?; @@ -100,17 +92,17 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { } /// Iterate through all keys and values in a particular column. - fn iter_column(&self, column: DBColumn) -> ColumnIter { + fn iter_column(&self, column: DBColumn) -> ColumnIter<'_, K> { self.iter_column_from(column, &vec![0; column.key_size()]) } /// Iterate through all keys and values in a column from a given starting point that fulfill the given predicate. - fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter; + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter<'_, K>; - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter<'_, K>; /// Iterate through all keys in a particular column. - fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter; + fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter<'_, K>; fn delete_batch(&self, column: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error>; @@ -130,7 +122,10 @@ impl Key for Hash256 { if key.len() == 32 { Ok(Hash256::from_slice(key)) } else { - Err(Error::InvalidKey) + Err(Error::InvalidKey(format!( + "Hash256 key unexpected len {}", + key.len() + ))) } } } @@ -162,7 +157,10 @@ pub fn get_data_column_key(block_root: &Hash256, column_index: &ColumnIndex) -> pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Error> { if data.len() != DBColumn::BeaconDataColumn.key_size() { - return Err(Error::InvalidKey); + return Err(Error::InvalidKey(format!( + "Unexpected BeaconDataColumn key len {}", + data.len() + ))); } // split_at panics if 32 < 40 which will never happen after the length check above let (block_root_bytes, column_index_bytes) = data.split_at(32); @@ -171,7 +169,7 @@ pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Er let column_index = ColumnIndex::from_le_bytes( column_index_bytes .try_into() - .map_err(|_| Error::InvalidKey)?, + .map_err(|e| Error::InvalidKey(format!("Invalid ColumnIndex {e:?}")))?, ); Ok((block_root, column_index)) } @@ -267,20 +265,40 @@ pub enum DBColumn { #[strum(serialize = "bdc")] BeaconDataColumn, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). + /// + /// DEPRECATED. #[strum(serialize = "ste")] BeaconState, + /// For compact `BeaconStateDiff`'s in the hot DB. + /// + /// hsd = Hot State Diff. + #[strum(serialize = "hsd")] + BeaconStateHotDiff, + /// For beacon state snapshots in the hot DB. + /// + /// hsn = Hot Snapshot. + #[strum(serialize = "hsn")] + BeaconStateHotSnapshot, /// For beacon state snapshots in the freezer DB. #[strum(serialize = "bsn")] BeaconStateSnapshot, /// For compact `BeaconStateDiff`s in the freezer DB. #[strum(serialize = "bsd")] BeaconStateDiff, - /// Mapping from state root to `HotStateSummary` in the hot DB. + /// DEPRECATED + /// + /// Mapping from state root to `HotStateSummaryV22` in the hot DB. /// /// Previously this column also served a role in the freezer DB, mapping state roots to /// `ColdStateSummary`. However that role is now filled by `BeaconColdStateSummary`. #[strum(serialize = "bss")] BeaconStateSummary, + /// Mapping from state root to `HotStateSummaryV23` in the hot DB. + /// + /// This column is populated after DB schema version 23 superseding `BeaconStateSummary`. The + /// new column is necessary to have a safe migration without data loss. + #[strum(serialize = "bs3")] + BeaconStateHotSummary, /// Mapping from state root to `ColdStateSummary` in the cold DB. #[strum(serialize = "bcs")] BeaconColdStateSummary, @@ -298,6 +316,7 @@ pub enum DBColumn { BeaconChain, #[strum(serialize = "opo")] OpPool, + /// DEPRECATED. #[strum(serialize = "etc")] Eth1Cache, #[strum(serialize = "frk")] @@ -339,6 +358,8 @@ pub enum DBColumn { BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, + #[strum(serialize = "cus")] + CustodyContext, /// DEPRECATED. For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, @@ -387,6 +408,9 @@ impl DBColumn { | Self::BeaconState | Self::BeaconBlob | Self::BeaconStateSummary + | Self::BeaconStateHotDiff + | Self::BeaconStateHotSnapshot + | Self::BeaconStateHotSummary | Self::BeaconColdStateSummary | Self::BeaconStateTemporary | Self::ExecPayload @@ -397,6 +421,7 @@ impl DBColumn { | Self::PubkeyCache | Self::BeaconRestorePoint | Self::DhtEnrs + | Self::CustodyContext | Self::OptimisticTransitionBlock => 32, Self::BeaconBlockRoots | Self::BeaconBlockRootsChunked diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 6070a2d3f0..a87d4f7f3f 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -2,7 +2,7 @@ use crate::{ errors::Error as DBError, get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp, }; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::RwLock; use std::collections::{BTreeMap, HashSet}; use std::marker::PhantomData; use types::*; @@ -12,7 +12,6 @@ type DBMap = BTreeMap>; /// A thread-safe `BTreeMap` wrapper. pub struct MemoryStore { db: RwLock, - transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -21,7 +20,6 @@ impl MemoryStore { pub fn open() -> Self { Self { db: RwLock::new(BTreeMap::new()), - transaction_mutex: Mutex::new(()), _phantom: PhantomData, } } @@ -82,7 +80,7 @@ impl KeyValueStore for MemoryStore { Ok(()) } - fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter<'_, K> { // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a // reference to the lock guard across calls to `.next()`. This would be require a // struct with a field (the iterator) which references another field (the lock guard). @@ -103,19 +101,15 @@ impl KeyValueStore for MemoryStore { })) } - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter<'_, K> { Box::new(self.iter_column(column).map(|res| res.map(|(k, _)| k))) } - fn begin_rw_transaction(&self) -> MutexGuard<()> { - self.transaction_mutex.lock() - } - fn compact_column(&self, _column: DBColumn) -> Result<(), Error> { Ok(()) } - fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter<'_, K> { // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a // reference to the lock guard across calls to `.next()`. This would be require a // struct with a field (the iterator) which references another field (the lock guard). diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 55c64bf850..63cb4661cd 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -2,9 +2,9 @@ use crate::{DBColumn, Error, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use types::{Checkpoint, Hash256, Slot}; +use types::{Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(23); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(25); // All the keys that get stored under the `BeaconMeta` column. // @@ -12,7 +12,8 @@ pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(23); pub const SCHEMA_VERSION_KEY: Hash256 = Hash256::repeat_byte(0); pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1); pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2); -pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); +// DEPRECATED +// pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); @@ -21,15 +22,6 @@ pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); -/// The `AnchorInfo` encoding full availability of all historic blocks & states. -pub const ANCHOR_FOR_ARCHIVE_NODE: AnchorInfo = AnchorInfo { - anchor_slot: Slot::new(0), - oldest_block_slot: Slot::new(0), - oldest_block_parent: Hash256::ZERO, - state_upper_limit: Slot::new(0), - state_lower_limit: Slot::new(0), -}; - /// The `AnchorInfo` encoding an uninitialized anchor. /// /// This value should never exist except on initial start-up prior to the anchor being initialised @@ -65,30 +57,6 @@ impl StoreItem for SchemaVersion { } } -/// The checkpoint used for pruning the database. -/// -/// Updated whenever pruning is successful. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct PruningCheckpoint { - pub checkpoint: Checkpoint, -} - -impl StoreItem for PruningCheckpoint { - fn db_column() -> DBColumn { - DBColumn::BeaconMeta - } - - fn as_store_bytes(&self) -> Vec { - self.checkpoint.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(PruningCheckpoint { - checkpoint: Checkpoint::from_ssz_bytes(bytes)?, - }) - } -} - /// The last time the database was compacted. pub struct CompactionTimestamp(pub u64); @@ -111,7 +79,8 @@ impl StoreItem for CompactionTimestamp { pub struct AnchorInfo { /// The slot at which the anchor state is present and which we cannot revert. Values on start: /// - Genesis start: 0 - /// - Checkpoint sync: Slot of the finalized checkpoint block + /// - Checkpoint sync: Slot of the finalized state advanced to the checkpoint epoch + /// - Existing DB prior to v23: Finalized state slot at the migration moment /// /// Immutable pub anchor_slot: Slot, @@ -175,6 +144,21 @@ impl AnchorInfo { pub fn full_state_pruning_enabled(&self) -> bool { self.state_lower_limit == 0 && self.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN } + + /// Compute the correct `AnchorInfo` for an archive node created from the current node. + /// + /// This method ensures that the `anchor_slot` which is used for the hot database's diff grid is + /// preserved. + pub fn as_archive_anchor(&self) -> Self { + Self { + // Anchor slot MUST be the same. It is immutable. + anchor_slot: self.anchor_slot, + oldest_block_slot: Slot::new(0), + oldest_block_parent: Hash256::ZERO, + state_upper_limit: Slot::new(0), + state_lower_limit: Slot::new(0), + } + } } impl StoreItem for AnchorInfo { diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 5da73c3cad..44b61e1ebe 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -4,6 +4,10 @@ use directory::size_of_dir; use std::path::Path; use std::sync::LazyLock; +// Labels used for histogram timer vecs that are tracked per DB (hot and cold). +pub const HOT_METRIC: &[&str] = &["hot"]; +pub const COLD_METRIC: &[&str] = &["cold"]; + /* * General */ @@ -142,66 +146,61 @@ pub static BEACON_STATE_HOT_GET_COUNT: LazyLock> = LazyLock:: "Total number of hot beacon states requested from the store (cache or DB)", ) }); -pub static BEACON_STATE_READ_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "store_beacon_state_read_seconds", - "Total time required to read a BeaconState from the database", - ) -}); -pub static BEACON_STATE_READ_OVERHEAD_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "store_beacon_state_read_overhead_seconds", - "Overhead on reading a beacon state from the DB (e.g., decoding)", - ) -}); -pub static BEACON_STATE_READ_COUNT: LazyLock> = LazyLock::new(|| { - try_create_int_counter( - "store_beacon_state_read_total", - "Total number of beacon state reads from the DB", - ) -}); -pub static BEACON_STATE_READ_BYTES: LazyLock> = LazyLock::new(|| { - try_create_int_counter( - "store_beacon_state_read_bytes_total", - "Total number of beacon state bytes read from the DB", - ) -}); -pub static BEACON_STATE_WRITE_OVERHEAD_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "store_beacon_state_write_overhead_seconds", - "Overhead on writing a beacon state to the DB (e.g., encoding)", - ) -}); -pub static BEACON_STATE_WRITE_COUNT: LazyLock> = LazyLock::new(|| { - try_create_int_counter( - "store_beacon_state_write_total", - "Total number of beacon state writes the DB", - ) -}); -pub static BEACON_STATE_WRITE_BYTES: LazyLock> = LazyLock::new(|| { - try_create_int_counter( - "store_beacon_state_write_bytes_total", - "Total number of beacon state bytes written to the DB", - ) -}); -pub static BEACON_HDIFF_READ_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( + +/* + * HDiffs + */ +pub static BEACON_HDIFF_READ_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "store_hdiff_read_seconds", - "Time required to read the hierarchical diff bytes from the database", + "Time taken to read hdiff bytes from disk", + &["db"], ) }); -pub static BEACON_HDIFF_DECODE_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( +pub static BEACON_HDIFF_DECODE_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "store_hdiff_decode_seconds", - "Time required to decode hierarchical diff bytes", + "Time taken to decode hdiff bytes", + &["db"], ) }); -pub static BEACON_HDIFF_BUFFER_CLONE_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( +pub static BEACON_HDIFF_APPLY_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( + "store_hdiff_apply_seconds", + "Time taken to apply an hdiff to a buffer", + &["db"], + ) +}); +pub static BEACON_HDIFF_COMPUTE_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( + "store_hdiff_compute_seconds", + "Time taken to compute an hdiff for a state", + &["db"], + ) +}); +pub static BEACON_HDIFF_BUFFER_LOAD_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( + "store_hdiff_buffer_load_seconds", + "Time taken to load an hdiff buffer for a state", + &["db"], + ) +}); +pub static BEACON_HDIFF_BUFFER_CLONE_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "store_hdiff_buffer_clone_seconds", - "Time required to clone hierarchical diff buffer bytes", + "Time taken to clone an hdiff buffer from a cache", + &["db"], ) }); +pub static BEACON_HDIFF_BUFFER_LOAD_BEFORE_STORE_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec( + "store_hdiff_buffer_load_before_store_seconds", + "Time taken to load the hdiff buffer required for the storage of a new state", + &["db"], + ) + }); +// This metric is not split hot/cold because it is recorded in a place where that info is not known. pub static BEACON_HDIFF_BUFFER_APPLY_RESIZES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( "store_hdiff_buffer_apply_resizes", @@ -209,6 +208,24 @@ pub static BEACON_HDIFF_BUFFER_APPLY_RESIZES: LazyLock> = Lazy Ok(vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) ) }); +// This metric is not split hot/cold because both databases use the same hierarchy config anyway +// and that's all that affects diff sizes. +pub static BEACON_HDIFF_SIZES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec_with_buckets( + "store_hdiff_sizes", + "Size of hdiffs in bytes by layer (exponent)", + Ok(vec![ + 500_000.0, + 2_000_000.0, + 5_000_000.0, + 10_000_000.0, + 15_000_000.0, + 20_000_000.0, + 50_000_000.0, + ]), + &["exponent"], + ) +}); /* * Beacon Block */ @@ -259,17 +276,20 @@ pub static STORE_BEACON_HISTORIC_STATE_CACHE_SIZE: LazyLock> = "Current count of states in the historic state cache", ) }); -pub static STORE_BEACON_HDIFF_BUFFER_CACHE_SIZE: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "store_beacon_hdiff_buffer_cache_size", - "Current count of hdiff buffers in the historic state cache", - ) -}); -pub static STORE_BEACON_HDIFF_BUFFER_CACHE_BYTE_SIZE: LazyLock> = +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_SIZE: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( + try_create_int_gauge_vec( + "store_beacon_hdiff_buffer_cache_size", + "Current count of hdiff buffers cached in memory", + &["db"], + ) + }); +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_BYTE_SIZE: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge_vec( "store_beacon_hdiff_buffer_cache_byte_size", - "Memory consumed by hdiff buffers in the historic state cache", + "Memory consumed by hdiff buffers cached in memory", + &["db"], ) }); pub static STORE_BEACON_STATE_FREEZER_COMPRESS_TIME: LazyLock> = @@ -286,33 +306,6 @@ pub static STORE_BEACON_STATE_FREEZER_DECOMPRESS_TIME: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "store_beacon_hdiff_buffer_apply_seconds", - "Time taken to apply hdiff buffer to a state buffer", - ) - }); -pub static STORE_BEACON_HDIFF_BUFFER_COMPUTE_TIME: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "store_beacon_hdiff_buffer_compute_seconds", - "Time taken to compute hdiff buffer to a state buffer", - ) - }); -pub static STORE_BEACON_HDIFF_BUFFER_LOAD_TIME: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "store_beacon_hdiff_buffer_load_seconds", - "Time taken to load an hdiff buffer", - ) -}); -pub static STORE_BEACON_HDIFF_BUFFER_LOAD_FOR_STORE_TIME: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "store_beacon_hdiff_buffer_load_for_store_seconds", - "Time taken to load an hdiff buffer to store another hdiff", - ) - }); pub static STORE_BEACON_HISTORIC_STATE_CACHE_HIT: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -327,18 +320,20 @@ pub static STORE_BEACON_HISTORIC_STATE_CACHE_MISS: LazyLock> "Total count of historic state cache misses for full states", ) }); -pub static STORE_BEACON_HDIFF_BUFFER_CACHE_HIT: LazyLock> = +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_HIT: LazyLock> = LazyLock::new(|| { - try_create_int_counter( + try_create_int_counter_vec( "store_beacon_hdiff_buffer_cache_hit_total", "Total count of hdiff buffer cache hits", + &["db"], ) }); -pub static STORE_BEACON_HDIFF_BUFFER_CACHE_MISS: LazyLock> = +pub static STORE_BEACON_HDIFF_BUFFER_CACHE_MISS: LazyLock> = LazyLock::new(|| { - try_create_int_counter( + try_create_int_counter_vec( "store_beacon_hdiff_buffer_cache_miss_total", "Total count of hdiff buffer cache miss", + &["db"], ) }); pub static STORE_BEACON_HDIFF_BUFFER_INTO_STATE_TIME: LazyLock> = diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index d209512159..fdd1880f55 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -147,6 +147,8 @@ where List, #[superstruct(only(Electra, Fulu))] pub pending_consolidations: List, + #[superstruct(only(Fulu))] + pub proposer_lookahead: Vector, } impl PartialBeaconState { @@ -444,7 +446,8 @@ impl TryInto> for PartialBeaconState { earliest_consolidation_epoch, pending_deposits, pending_partial_withdrawals, - pending_consolidations + pending_consolidations, + proposer_lookahead ], [historical_summaries] ), diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 30df552b7b..ade111983b 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -1,6 +1,5 @@ //! Implementation of historic state reconstruction (given complete block history). use crate::hot_cold_store::{HotColdDB, HotColdDBError}; -use crate::metadata::ANCHOR_FOR_ARCHIVE_NODE; use crate::metrics; use crate::{Error, ItemStore}; use itertools::{process_results, Itertools}; @@ -145,10 +144,8 @@ where }); } - self.compare_and_set_anchor_info_with_write( - old_anchor, - ANCHOR_FOR_ARCHIVE_NODE, - )?; + let new_anchor = old_anchor.as_archive_anchor(); + self.compare_and_set_anchor_info_with_write(old_anchor, new_anchor)?; return Ok(()); } else { diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 281ecab152..b6aacbb77a 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -1,4 +1,8 @@ -use crate::Error; +use crate::hdiff::HDiffBuffer; +use crate::{ + metrics::{self, HOT_METRIC}, + Error, +}; use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; @@ -37,26 +41,53 @@ pub struct StateCache { // the state_root states: LruCache)>, block_map: BlockMap, + hdiff_buffers: HotHDiffBufferCache, max_epoch: Epoch, head_block_root: Hash256, headroom: NonZeroUsize, } +/// Cache of hdiff buffers for hot states. +/// +/// This cache only keeps buffers prior to the finalized state, which are required by the +/// hierarchical state diff scheme to construct newer unfinalized states. +/// +/// The cache always retains the hdiff buffer for the most recent snapshot so that even if the +/// cache capacity is 1, this snapshot never needs to be loaded from disk. +#[derive(Debug)] +pub struct HotHDiffBufferCache { + /// Cache of HDiffBuffers for states *prior* to the `finalized_state`. + /// + /// Maps state_root -> (slot, buffer). + hdiff_buffers: LruCache, +} + #[derive(Debug)] pub enum PutStateOutcome { + /// State is prior to the cache's finalized state (lower slot) and was cached as an HDiffBuffer. + PreFinalizedHDiffBuffer, + /// State is equal to the cache's finalized state and was not inserted. Finalized, + /// State was already present in the cache. Duplicate, - /// Includes deleted states as a result of this insertion + /// State is new to the cache and was inserted. + /// + /// Includes deleted states as a result of this insertion. New(Vec), } #[allow(clippy::len_without_is_empty)] impl StateCache { - pub fn new(capacity: NonZeroUsize, headroom: NonZeroUsize) -> Self { + pub fn new( + state_capacity: NonZeroUsize, + headroom: NonZeroUsize, + hdiff_capacity: NonZeroUsize, + ) -> Self { StateCache { finalized_state: None, - states: LruCache::new(capacity), + states: LruCache::new(state_capacity), block_map: BlockMap::default(), + hdiff_buffers: HotHDiffBufferCache::new(hdiff_capacity), max_epoch: Epoch::new(0), head_block_root: Hash256::ZERO, headroom, @@ -71,11 +102,20 @@ impl StateCache { self.states.cap().get() } + pub fn num_hdiff_buffers(&self) -> usize { + self.hdiff_buffers.len() + } + + pub fn hdiff_buffer_mem_usage(&self) -> usize { + self.hdiff_buffers.mem_usage() + } + pub fn update_finalized_state( &mut self, state_root: Hash256, block_root: Hash256, state: BeaconState, + pre_finalized_slots_to_retain: &[Slot], ) -> Result<(), Error> { if state.slot() % E::slots_per_epoch() != 0 { return Err(Error::FinalizedStateUnaligned); @@ -95,9 +135,31 @@ impl StateCache { // Prune block map. let state_roots_to_prune = self.block_map.prune(state.slot()); + // Prune HDiffBuffers that are no longer required by the hdiff grid of the finalized state. + // We need to do this prior to copying in any new hdiff buffers, because the cache + // preferences older slots. + // NOTE: This isn't perfect as it prunes by slot: there could be multiple buffers + // at some slots in the case of long forks without finality. + let new_hdiff_cache = HotHDiffBufferCache::new(self.hdiff_buffers.cap()); + let old_hdiff_cache = std::mem::replace(&mut self.hdiff_buffers, new_hdiff_cache); + for (state_root, (slot, buffer)) in old_hdiff_cache.hdiff_buffers { + if pre_finalized_slots_to_retain.contains(&slot) { + self.hdiff_buffers.put(state_root, slot, buffer); + } + } + // Delete states. for state_root in state_roots_to_prune { - self.states.pop(&state_root); + if let Some((_, state)) = self.states.pop(&state_root) { + // Add the hdiff buffer for this state to the hdiff cache if it is now part of + // the pre-finalized grid. The `put` method will take care of keeping the most + // useful buffers. + let slot = state.slot(); + if pre_finalized_slots_to_retain.contains(&slot) { + let hdiff_buffer = HDiffBuffer::from_state(state); + self.hdiff_buffers.put(state_root, slot, hdiff_buffer); + } + } } // Update finalized state. @@ -136,12 +198,19 @@ impl StateCache { block_root: Hash256, state: &BeaconState, ) -> Result { - if self - .finalized_state - .as_ref() - .is_some_and(|finalized_state| finalized_state.state_root == state_root) - { - return Ok(PutStateOutcome::Finalized); + if let Some(ref finalized_state) = self.finalized_state { + if finalized_state.state_root == state_root { + return Ok(PutStateOutcome::Finalized); + } else if state.slot() <= finalized_state.state.slot() { + // We assume any state being inserted into the cache is grid-aligned (it is the + // caller's responsibility to not feed us garbage) as we don't want to thread the + // hierarchy config through here. So any state received is converted to an + // HDiffBuffer and saved. + let hdiff_buffer = HDiffBuffer::from_state(state.clone()); + self.hdiff_buffers + .put(state_root, state.slot(), hdiff_buffer); + return Ok(PutStateOutcome::PreFinalizedHDiffBuffer); + } } if self.states.peek(&state_root).is_some() { @@ -192,6 +261,37 @@ impl StateCache { self.states.get(&state_root).map(|(_, state)| state.clone()) } + pub fn put_hdiff_buffer(&mut self, state_root: Hash256, slot: Slot, buffer: &HDiffBuffer) { + // Only accept HDiffBuffers prior to finalization. Later states should be stored as proper + // states, not HDiffBuffers. + if let Some(finalized_state) = &self.finalized_state { + if slot >= finalized_state.state.slot() { + return; + } + } + self.hdiff_buffers.put(state_root, slot, buffer.clone()); + } + + pub fn get_hdiff_buffer_by_state_root(&mut self, state_root: Hash256) -> Option { + if let Some(buffer) = self.hdiff_buffers.get(&state_root) { + metrics::inc_counter_vec(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT, HOT_METRIC); + let timer = + metrics::start_timer_vec(&metrics::BEACON_HDIFF_BUFFER_CLONE_TIME, HOT_METRIC); + let result = Some(buffer.clone()); + drop(timer); + return result; + } + if let Some(buffer) = self + .get_by_state_root(state_root) + .map(HDiffBuffer::from_state) + { + metrics::inc_counter_vec(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT, HOT_METRIC); + return Some(buffer); + } + metrics::inc_counter_vec(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_MISS, HOT_METRIC); + None + } + pub fn get_by_block_root( &mut self, block_root: Hash256, @@ -325,3 +425,80 @@ impl BlockMap { self.blocks.remove(block_root) } } + +impl HotHDiffBufferCache { + pub fn new(capacity: NonZeroUsize) -> Self { + Self { + hdiff_buffers: LruCache::new(capacity), + } + } + + pub fn get(&mut self, state_root: &Hash256) -> Option { + self.hdiff_buffers + .get(state_root) + .map(|(_, buffer)| buffer.clone()) + } + + /// Put a value in the cache, making room for it if necessary. + /// + /// If the value was inserted then `true` is returned. + pub fn put(&mut self, state_root: Hash256, slot: Slot, buffer: HDiffBuffer) -> bool { + // If the cache is not full, simply insert the value. + if self.hdiff_buffers.len() != self.hdiff_buffers.cap().get() { + self.hdiff_buffers.put(state_root, (slot, buffer)); + return true; + } + + // If the cache is full, it has room for this new entry if: + // + // - The capacity is greater than 1: we can retain the snapshot and the new entry, or + // - The capacity is 1 and the slot of the new entry is older than the min_slot in the + // cache. This is a simplified way of retaining the snapshot in the cache. We don't need + // to worry about inserting/retaining states older than the snapshot because these are + // pruned on finalization and never reinserted. + let Some(min_slot) = self.hdiff_buffers.iter().map(|(_, (slot, _))| *slot).min() else { + // Unreachable: cache is full so should have >0 entries. + return false; + }; + + if self.hdiff_buffers.cap().get() > 1 || slot < min_slot { + // Remove LRU value. Cache is now at size `cap - 1`. + let Some((removed_state_root, (removed_slot, removed_buffer))) = + self.hdiff_buffers.pop_lru() + else { + // Unreachable: cache is full so should have at least one entry to pop. + return false; + }; + + // Insert new value. Cache size is now at size `cap`. + self.hdiff_buffers.put(state_root, (slot, buffer)); + + // If the removed value had the min slot and we didn't intend to replace it (cap=1) + // then we reinsert it. + if removed_slot == min_slot && slot >= min_slot { + self.hdiff_buffers + .put(removed_state_root, (removed_slot, removed_buffer)); + } + true + } else { + // No room. + false + } + } + + pub fn cap(&self) -> NonZeroUsize { + self.hdiff_buffers.cap() + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.hdiff_buffers.len() + } + + pub fn mem_usage(&self) -> usize { + self.hdiff_buffers + .iter() + .map(|(_, (_, buffer))| buffer.size()) + .sum() + } +} diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index e9954e2ad9..f92ae7846b 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -17,6 +17,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v7.1.0 | TBD 2025 | v23 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | @@ -206,6 +207,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v7.1.0 | TBD 2025 | v23 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes before Electra using <= v7.0.0 | diff --git a/book/src/advanced_re-orgs.md b/book/src/advanced_re-orgs.md index fca156bda3..3a31778786 100644 --- a/book/src/advanced_re-orgs.md +++ b/book/src/advanced_re-orgs.md @@ -2,6 +2,9 @@ Since v3.4.0 Lighthouse will opportunistically re-org late blocks when proposing. +When Lighthouse is about to propose a new block, it quickly checks whether the block from the previous slot landed so late that hardly anyone attested to it. +If that late block looks weak enough, Lighthouse may decide to “re-org” it away: instead of building on it, Lighthouse builds its new block on the grand-parent block, turning the late block into an orphan. + This feature is intended to disincentivise late blocks and improve network health. Proposing a re-orging block is also more profitable for the proposer because it increases the number of attestations and transactions that can be included. diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index b65bef4762..2eee8356b1 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -353,126 +353,6 @@ See [Validator Inclusion APIs](./api_validator_inclusion.md). See [Validator Inclusion APIs](./api_validator_inclusion.md). -## `/lighthouse/eth1/syncing` - -Returns information regarding execution layer, as it is required for use in -consensus layer - -### Fields - -- `head_block_number`, `head_block_timestamp`: the block number and timestamp -from the very head of the execution chain. Useful for understanding the immediate -health of the execution node that the beacon node is connected to. -- `latest_cached_block_number` & `latest_cached_block_timestamp`: the block -number and timestamp of the latest block we have in our block cache. - - For correct execution client voting this timestamp should be later than the -`voting_target_timestamp`. - -- `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. -- `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the - execution node is from the head of the execution chain. - - `100.0` indicates a fully synced execution node. - - `0.0` indicates an execution node that has not verified any blocks past the - genesis block. -- `lighthouse_is_cached_and_ready`: Is set to `true` if the caches in the - beacon node are ready for block production. - - This value might be set to - `false` whilst `eth1_node_sync_status_percentage == 100.0` if the beacon - node is still building its internal cache. - - This value might be set to `true` whilst - `eth1_node_sync_status_percentage < 100.0` since the cache only cares - about blocks a certain distance behind the head. - -### Example - -```bash -curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: application/json" | jq -``` - -```json -{ - "data": { - "head_block_number": 3611806, - "head_block_timestamp": 1603249317, - "latest_cached_block_number": 3610758, - "latest_cached_block_timestamp": 1603233597, - "voting_target_timestamp": 1603228632, - "eth1_node_sync_status_percentage": 100, - "lighthouse_is_cached_and_ready": true - } -} -``` - -## `/lighthouse/eth1/block_cache` - -Returns a list of all the execution layer blocks in the execution client voting cache. - -### Example - -```bash -curl -X GET "http://localhost:5052/lighthouse/eth1/block_cache" -H "accept: application/json" | jq -``` - -```json -{ - "data": [ - { - "hash": "0x3a17f4b7ae4ee57ef793c49ebc9c06ff85207a5e15a1d0bd37b68c5ef5710d7f", - "timestamp": 1603173338, - "number": 3606741, - "deposit_root": "0xd24920d936e8fb9b67e93fd126ce1d9e14058b6d82dcf7d35aea46879fae6dee", - "deposit_count": 88911 - }, - { - "hash": "0x78852954ea4904e5f81038f175b2adefbede74fbb2338212964405443431c1e7", - "timestamp": 1603173353, - "number": 3606742, - "deposit_root": "0xd24920d936e8fb9b67e93fd126ce1d9e14058b6d82dcf7d35aea46879fae6dee", - "deposit_count": 88911 - } - ] -} -``` - -## `/lighthouse/eth1/deposit_cache` - -Returns a list of all cached logs from the deposit contract. - -### Example - -```bash -curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: application/json" | jq -``` - -```json -{ - "data": [ - { - "deposit_data": { - "pubkey": "0xae9e6a550ac71490cdf134533b1688fcbdb16f113d7190eacf4f2e9ca6e013d5bd08c37cb2bde9bbdec8ffb8edbd495b", - "withdrawal_credentials": "0x0062a90ebe71c4c01c4e057d7d13b944d9705f524ebfa24290c22477ab0517e4", - "amount": "32000000000", - "signature": "0xa87a4874d276982c471e981a113f8af74a31ffa7d18898a02df2419de2a7f02084065784aa2f743d9ddf80952986ea0b012190cd866f1f2d9c633a7a33c2725d0b181906d413c82e2c18323154a2f7c7ae6f72686782ed9e423070daa00db05b" - }, - "block_number": 3086571, - "index": 0, - "signature_is_valid": false - }, - { - "deposit_data": { - "pubkey": "0xb1d0ec8f907e023ea7b8cb1236be8a74d02ba3f13aba162da4a68e9ffa2e395134658d150ef884bcfaeecdf35c286496", - "withdrawal_credentials": "0x00a6aa2a632a6c4847cf87ef96d789058eb65bfaa4cc4e0ebc39237421c22e54", - "amount": "32000000000", - "signature": "0x8d0f8ec11935010202d6dde9ab437f8d835b9cfd5052c001be5af9304f650ada90c5363022e1f9ef2392dd222cfe55b40dfd52578468d2b2092588d4ad3745775ea4d8199216f3f90e57c9435c501946c030f7bfc8dbd715a55effa6674fd5a4" - }, - "block_number": 3086579, - "index": 1, - "signature_is_valid": false - } - ] -} -``` - ## `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list diff --git a/book/src/api_vc_endpoints.md b/book/src/api_vc_endpoints.md index 87c9a517a5..14f4933e17 100644 --- a/book/src/api_vc_endpoints.md +++ b/book/src/api_vc_endpoints.md @@ -19,6 +19,7 @@ | [`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. | | [`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs | | [`GET /lighthouse/beacon/health`](#get-lighthousebeaconhealth) | Get health information for each connected beacon node. | +| [`POST /lighthouse/beacon/update`](#post-lighthousebeaconupdate) | Update the `--beacon-nodes` list. | The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api_vc_auth_header.md). @@ -926,3 +927,57 @@ curl -X GET http://localhost:5062/lighthouse/beacon/health \ } } ``` + +## `POST /lighthouse/beacon/update` + +Updates the list of beacon nodes originally specified by the `--beacon-nodes` CLI flag. +Use this endpoint when you don't want to restart the VC to add, remove or reorder beacon nodes. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/beacon/update` | +| Method | POST | +| Required Headers | [`Authorization`](./api_vc_auth_header.md) | +| Typical Responses | 200, 400 | + +### Example Request Body + +```json +{ + "beacon_nodes": [ + "http://beacon-node1:5052", + "http://beacon-node2:5052", + "http://beacon-node3:5052" + ] +} +``` + +Command: + +```bash +DATADIR=/var/lib/lighthouse +curl -X POST http://localhost:5062/lighthouse/beacon/update \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + -d "{\"beacon_nodes\":[\"http://beacon-node1:5052\",\"http://beacon-node2:5052\",\"http://beacon-node3:5052\"]}" +``` + +### Example Response Body + +```json +{ + "data": { + "new_beacon_nodes_list": [ + "http://beacon-node1:5052", + "http://beacon-node2:5052", + "http://beacon-node3:5052" + ] + } +} +``` + +If successful, the response will be a copy of the new list included in the request. +If unsuccessful, an error will be shown and the beacon nodes list will not be updated. +You can verify the results of the endpoint by using the `/lighthouse/beacon/health` endpoint. diff --git a/book/src/archived_key_management.md b/book/src/archived_key_management.md index d8b00e8352..ad285ac4ec 100644 --- a/book/src/archived_key_management.md +++ b/book/src/archived_key_management.md @@ -21,7 +21,7 @@ using Lighthouse. Rather than continuing to read this page, we recommend users visit either: - The [Staking Launchpad][launchpad] for detailed, beginner-friendly instructions. -- The [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) for a CLI tool used by the [Staking Launchpad][launchpad]. +- The [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli/releases) for a CLI tool used by the [Staking Launchpad][launchpad]. - The [validator-manager documentation](./validator_manager.md) for a Lighthouse-specific tool for streamlined validator management tools. ## The `lighthouse account-manager` diff --git a/book/src/faq.md b/book/src/faq.md index b97a82fcca..27726e59a5 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -209,7 +209,7 @@ The first thing is to ensure both consensus and execution clients are synced wit - the internet is working well - you have sufficient peers -You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). +You can see more information on the [EthStaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Another cause for missing attestations is the block arriving late, or there are delays during block processing. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 35ad020b74..b2d2af6cec 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -122,15 +122,6 @@ Options: The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less frequent runs can be useful for minimizing disk writes [default: 1] - --eth1-blocks-per-log-query - Specifies the number of blocks that a deposit log query should span. - This will reduce the size of responses from the Eth1 endpoint. - [default: 1000] - --eth1-cache-follow-distance - Specifies the distance between the Eth1 chain head and the last block - which should be imported into the cache. Setting this value lower can - help compensate for irregular Proof-of-Work block times, but setting - it too low can make the node vulnerable to re-orgs. --execution-endpoint Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to populate the deposit cache. @@ -171,10 +162,10 @@ Options: Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated to fit in 32 bytes. --hdiff-buffer-cache-size - Number of hierarchical diff (hdiff) buffers to cache in memory. Each - buffer is around the size of a BeaconState so you should be cautious - about setting this value too high. This flag is irrelevant for most - nodes, which run with state pruning enabled. [default: 16] + Number of cold hierarchical diff (hdiff) buffers to cache in memory. + Each buffer is around the size of a BeaconState so you should be + cautious about setting this value too high. This flag is irrelevant + for most nodes, which run with state pruning enabled. [default: 16] --hierarchy-exponents Specifies the frequency for storing full state snapshots and hierarchical diffs in the freezer DB. Accepts a comma-separated list @@ -187,6 +178,12 @@ Options: --historic-state-cache-size Specifies how many states from the freezer database should be cached in memory [default: 1] + --hot-hdiff-buffer-cache-size + Number of hot hierarchical diff (hdiff) buffers to cache in memory. + Each buffer is around the size of a BeaconState so you should be + cautious about setting this value too high. Setting this value higher + can reduce the time taken to store new states on disk at the cost of + higher memory usage. [default: 1] --http-address
Set the listen address for the RESTful HTTP API server. --http-allow-origin @@ -448,10 +445,6 @@ Flags: resource contention which degrades staking performance. Stakers should generally choose to avoid this flag since backfill sync is not required for staking. - --disable-deposit-contract-sync - Explicitly disables syncing of deposit logs from the execution node. - This overrides any previous option that depends on it. Useful if you - intend to run a non-validating beacon node. --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This @@ -493,8 +486,6 @@ Flags: --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses. - --eth1-purge-cache - Purges the eth1 block and deposit caches --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint syncing. diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 85e1a1168f..8ff54122ef 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -12,7 +12,7 @@ Commands: data. This file can then be imported to a validator client using the "import-validators" command. Another, optional JSON file is created which contains a list of validator deposits in the same format as the - "ethereum/staking-deposit-cli" tool. + "ethstaker-deposit-cli" tool. import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be generated using the diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 3b88206397..96ae261252 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -5,7 +5,7 @@ Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and other validator data. This file can then be imported to a validator client using the "import-validators" command. Another, optional JSON file is created which contains a list of validator -deposits in the same format as the "ethereum/staking-deposit-cli" tool. +deposits in the same format as the "ethstaker-deposit-cli" tool. Usage: lighthouse validator_manager create [OPTIONS] --output-path diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 63cca91ee5..ca635be5f1 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -39,8 +39,7 @@ Options: [default: 300] --keystore-file The path to a keystore JSON file to be imported to the validator - client. This file is usually created using staking-deposit-cli or - ethstaker-deposit-cli + client. This file is usually created using ethstaker-deposit-cli --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] diff --git a/book/src/installation_homebrew.md b/book/src/installation_homebrew.md index f94764889e..9d33bfb3eb 100644 --- a/book/src/installation_homebrew.md +++ b/book/src/installation_homebrew.md @@ -5,6 +5,9 @@ Lighthouse is available on Linux and macOS via the [Homebrew package manager](ht Please note that this installation method is maintained by the Homebrew community. It is not officially supported by the Lighthouse team. +> Note: There is a [compilation error](https://github.com/Homebrew/homebrew-core/pull/220922) for Lighthouse v7.0.0 and above that remains unresolved. Users are recommended to download the binary from [the release +page](https://github.com/sigp/lighthouse/releases) or build from source. + ## Installation Install the latest version of the [`lighthouse`][formula] formula with: diff --git a/book/src/mainnet_validator.md b/book/src/mainnet_validator.md index 8da8b98f89..106461aa9b 100644 --- a/book/src/mainnet_validator.md +++ b/book/src/mainnet_validator.md @@ -42,7 +42,7 @@ hardware. 32 ETH is a significant outlay and joining a testnet is a great way to ### Step 1. Create validator keys -The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: +EthStaker provides the [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli/releases) for creating validator keys. Download and run the `ethstaker-deposit-cli` with the command: ```bash ./deposit new-mnemonic @@ -52,7 +52,7 @@ and follow the instructions to generate the keys. When prompted for a network, s > **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. -Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. +Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from `ethstaker-deposit-cli` can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. > Lighthouse also supports creating validator keys, see [Validator Manager Create](./validator_manager_create.md) for more info. @@ -62,19 +62,19 @@ Start an execution client and Lighthouse beacon node according to the [Run a Nod ### Step 3. Import validator keys to Lighthouse -In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that -this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: +In [Step 1](#step-1-create-validator-keys), the `ethstaker-deposit-cli` will generate the validator keys into a `validator_keys` directory. Let's assume that +this directory is `$HOME/ethstaker-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: Mainnet: ```bash -lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys +lighthouse --network mainnet account validator import --directory $HOME/ethstaker-deposit-cli/validator_keys ``` Hoodi testnet: ```bash -lighthouse --network hoodi account validator import --directory $HOME/staking-deposit-cli/validator_keys +lighthouse --network hoodi account validator import --directory $HOME/ethstaker-deposit-cli/validator_keys ``` > Note: The user must specify the consensus client network that they are importing the keys by using the `--network` flag. @@ -88,7 +88,7 @@ lighthouse --network hoodi account validator import --directory $HOME/staking-de The user will be prompted for a password for each keystore discovered: ``` -Keystore found at "/home/{username}/staking-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": +Keystore found at "/home/{username}/ethstaker-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f diff --git a/book/src/validator_manager.md b/book/src/validator_manager.md index c610340b39..b0190c1812 100644 --- a/book/src/validator_manager.md +++ b/book/src/validator_manager.md @@ -15,7 +15,7 @@ except the latter creates files that will be read by the VC next time it starts whilst the former makes instant changes to a live VC. The `account-manager` is ideal for importing keys created with the -[staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli). On the +[ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli). On the other hand, the `validator-manager` is ideal for moving existing validators between two VCs or for advanced users to create validators at scale with less downtime. diff --git a/book/src/validator_manager_api.md b/book/src/validator_manager_api.md index a5fc69fd5a..7bc5be8557 100644 --- a/book/src/validator_manager_api.md +++ b/book/src/validator_manager_api.md @@ -1,6 +1,6 @@ # Managing Validators -The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. +The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. By default, the validator client HTTP address is `http://localhost:5062`. If a different IP address or port is used, add the flag `--vc-url http://IP:port_number` to the command below. ## Delete @@ -18,7 +18,7 @@ lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt - ## Import -The `import` command imports validator keystores generated by the staking-deposit-cli/ethstaker-deposit-cli. To import a validator keystore: +The `import` command imports validator keystores generated by the `ethstaker-deposit-cli`. To import a validator keystore: ```bash lighthouse vm import --vc-token --keystore-file /path/to/json --password keystore_password diff --git a/book/src/validator_manager_create.md b/book/src/validator_manager_create.md index 458907bc65..ae40910d5c 100644 --- a/book/src/validator_manager_create.md +++ b/book/src/validator_manager_create.md @@ -8,7 +8,7 @@ mnemonic and produces two files: - `validators.json`: the keystores and passwords for the newly generated validators, in JSON format. - `deposits.json`: a JSON file of the same format as - [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which can + [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli) which can be used for deposit submission via the [Ethereum Staking Launchpad][]. @@ -69,7 +69,7 @@ lighthouse \ > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. -> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator_manager_api.md#import). +> Note: To import validators with validator-manager using keystore files created using the `ethstaker-deposit-cli`, refer to [Managing Validators](./validator_manager_api.md#import). ## Detailed Guide diff --git a/book/src/validator_slashing_protection.md b/book/src/validator_slashing_protection.md index 3e0fe184e5..03e54e5827 100644 --- a/book/src/validator_slashing_protection.md +++ b/book/src/validator_slashing_protection.md @@ -21,7 +21,7 @@ and carefully to keep your validators safe. See the [Troubleshooting](#troublesh The database will be automatically created, and your validators registered with it when: -* Importing keys from another source (e.g. [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). +* Importing keys from another source (e.g. [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). See [import validator keys](./mainnet_validator.md#step-3-import-validator-keys-to-lighthouse). * Creating keys using Lighthouse itself (`lighthouse account validator create`) * Creating keys via the [validator client API](./api_vc.md). diff --git a/book/src/validator_voluntary_exit.md b/book/src/validator_voluntary_exit.md index d5d1722d59..2a45852f32 100644 --- a/book/src/validator_voluntary_exit.md +++ b/book/src/validator_voluntary_exit.md @@ -94,7 +94,7 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. -- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `ethstaker-deposit-cli` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). ### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 4c253283fe..5f32645c92 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -450,11 +450,11 @@ pub fn is_voting_keystore(file_name: &str) -> bool { return true; } - // The format exported by the `eth2.0-deposit-cli` library. + // The format exported by the `ethstaker-deposit-cli` library. // // Reference to function that generates keystores: // - // https://github.com/ethereum/eth2.0-deposit-cli/blob/7cebff15eac299b3b1b090c896dd3410c8463450/eth2deposit/credentials.py#L58-L62 + // https://github.com/eth-educators/ethstaker-deposit-cli/blob/80d536374de838ccae142974ed0e747b46beb030/ethstaker_deposit/credentials.py#L186-L190 // // Since we include the key derivation path of `m/12381/3600/x/0/0` this should only ever match // with a voting keystore and never a withdrawal keystore. diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 2ea8929fd9..712759a8ee 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -18,7 +18,6 @@ use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; use derivative::Derivative; -use either::Either; use futures::Stream; use futures_util::StreamExt; use libp2p_identity::PeerId; @@ -51,6 +50,24 @@ pub const CONTENT_TYPE_HEADER: &str = "Content-Type"; pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; pub const JSON_CONTENT_TYPE_HEADER: &str = "application/json"; +/// Specific optimized timeout constants for HTTP requests involved in different validator duties. +/// This can help ensure that proper endpoint fallback occurs. +const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT: u32 = 24; +const HTTP_ATTESTATION_AGGREGATOR_TIMEOUT_QUOTIENT: u32 = 24; // For DVT involving middleware only +const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; +const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_AGGREGATOR_TIMEOUT_QUOTIENT: u32 = 24; // For DVT involving middleware only +const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; +const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; +const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; + #[derive(Debug)] pub enum Error { /// The `reqwest` client raised an error. @@ -169,6 +186,28 @@ impl Timeouts { default: timeout, } } + + pub fn use_optimized_timeouts(base_timeout: Duration) -> Self { + Timeouts { + attestation: base_timeout / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, + attester_duties: base_timeout / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + attestation_subscriptions: base_timeout + / HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT, + attestation_aggregators: base_timeout / HTTP_ATTESTATION_AGGREGATOR_TIMEOUT_QUOTIENT, + liveness: base_timeout / HTTP_LIVENESS_TIMEOUT_QUOTIENT, + proposal: base_timeout / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, + proposer_duties: base_timeout / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: base_timeout + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, + sync_duties: base_timeout / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, + sync_aggregators: base_timeout / HTTP_SYNC_AGGREGATOR_TIMEOUT_QUOTIENT, + get_beacon_blocks_ssz: base_timeout / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_debug_beacon_states: base_timeout / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: base_timeout / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, + get_validator_block: base_timeout / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, + default: base_timeout / HTTP_DEFAULT_TIMEOUT_QUOTIENT, + } + } } /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a @@ -670,6 +709,29 @@ impl BeaconNodeHttpClient { self.post_with_opt_response(path, &request).await } + /// `POST beacon/states/{state_id}/validator_identities` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_states_validator_identities( + &self, + state_id: StateId, + ids: Vec, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validator_identities"); + + let request = ValidatorIdentitiesRequestBody { ids }; + + self.post_with_opt_response(path, &request).await + } + /// `GET beacon/states/{state_id}/validators?id,status` /// /// Returns `Ok(None)` on a 404 error. @@ -1300,7 +1362,9 @@ impl BeaconNodeHttpClient { } self.get_fork_contextual(path, |fork| { - (fork, spec.max_blobs_per_block_by_fork(fork) as usize) + // TODO(EIP-7892): this will overestimate the max number of blobs + // It would be better if we could get an epoch passed into this function + (fork, spec.max_blobs_per_block_within_fork(fork) as usize) }) .await .map(|opt| opt.map(BeaconResponse::ForkVersioned)) @@ -1436,29 +1500,10 @@ impl BeaconNodeHttpClient { .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } - /// `POST v1/beacon/pool/attestations` - pub async fn post_beacon_pool_attestations_v1( - &self, - attestations: &[Attestation], - ) -> Result<(), Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("beacon") - .push("pool") - .push("attestations"); - - self.post_with_timeout(path, &attestations, self.timeouts.attestation) - .await?; - - Ok(()) - } - /// `POST v2/beacon/pool/attestations` pub async fn post_beacon_pool_attestations_v2( &self, - attestations: Either>, Vec>, + attestations: Vec, fork_name: ForkName, ) -> Result<(), Error> { let mut path = self.eth_path(V2)?; @@ -1469,26 +1514,13 @@ impl BeaconNodeHttpClient { .push("pool") .push("attestations"); - match attestations { - Either::Right(attestations) => { - self.post_with_timeout_and_consensus_header( - path, - &attestations, - self.timeouts.attestation, - fork_name, - ) - .await?; - } - Either::Left(attestations) => { - self.post_with_timeout_and_consensus_header( - path, - &attestations, - self.timeouts.attestation, - fork_name, - ) - .await?; - } - }; + self.post_with_timeout_and_consensus_header( + path, + &attestations, + self.timeouts.attestation, + fork_name, + ) + .await?; Ok(()) } @@ -1717,18 +1749,6 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `GET beacon/deposit_snapshot` - pub async fn get_deposit_snapshot(&self) -> Result, Error> { - let mut path = self.eth_path(V1)?; - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("beacon") - .push("deposit_snapshot"); - self.get_opt_with_timeout::, _>(path, self.timeouts.get_deposit_snapshot) - .await - .map(|opt| opt.map(|r| r.data)) - } - /// `POST beacon/rewards/sync_committee` pub async fn post_beacon_rewards_sync_committee( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 9a5d9100cf..24fb110a35 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -7,11 +7,8 @@ pub mod sync_state; use crate::{ lighthouse::sync_state::SyncState, - types::{ - AdminPeer, DepositTreeSnapshot, Epoch, FinalizedExecutionBlock, GenericResponse, - ValidatorId, - }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, + types::{AdminPeer, Epoch, GenericResponse, ValidatorId}, + BeaconNodeHttpClient, DepositData, Error, Hash256, Slot, }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; @@ -159,18 +156,6 @@ pub struct ProcessHealth { pub pid_process_seconds_total: u64, } -/// Indicates how up-to-date the Eth1 caches are. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Eth1SyncStatusData { - pub head_block_number: Option, - pub head_block_timestamp: Option, - pub latest_cached_block_number: Option, - pub latest_cached_block_timestamp: Option, - pub voting_target_timestamp: u64, - pub eth1_node_sync_status_percentage: f64, - pub lighthouse_is_cached_and_ready: bool, -} - /// A fully parsed eth1 deposit contract log. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)] pub struct DepositLog { @@ -183,41 +168,6 @@ pub struct DepositLog { pub signature_is_valid: bool, } -/// A block of the eth1 chain. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)] -pub struct Eth1Block { - pub hash: Hash256, - pub timestamp: u64, - pub number: u64, - #[ssz(with = "four_byte_option_hash256")] - pub deposit_root: Option, - #[ssz(with = "four_byte_option_u64")] - pub deposit_count: Option, -} - -impl Eth1Block { - pub fn eth1_data(self) -> Option { - Some(Eth1Data { - deposit_root: self.deposit_root?, - deposit_count: self.deposit_count?, - block_hash: self.hash, - }) - } -} - -impl From for FinalizedExecutionBlock { - fn from(eth1_block: Eth1Block) -> Self { - Self { - deposit_count: eth1_block.deposit_count.unwrap_or(0), - deposit_root: eth1_block - .deposit_root - .unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root), - block_hash: eth1_block.hash, - block_height: eth1_block.number, - } - } -} - impl BeaconNodeHttpClient { /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { @@ -298,63 +248,6 @@ impl BeaconNodeHttpClient { self.get(path).await } - /// `GET lighthouse/eth1/syncing` - pub async fn get_lighthouse_eth1_syncing( - &self, - ) -> Result, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("eth1") - .push("syncing"); - - self.get(path).await - } - - /// `GET lighthouse/eth1/block_cache` - pub async fn get_lighthouse_eth1_block_cache( - &self, - ) -> Result>, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("eth1") - .push("block_cache"); - - self.get(path).await - } - - /// `GET lighthouse/eth1/deposit_cache` - pub async fn get_lighthouse_eth1_deposit_cache( - &self, - ) -> Result>, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("eth1") - .push("deposit_cache"); - - self.get(path).await - } - - /// `GET lighthouse/staking` - pub async fn get_lighthouse_staking(&self) -> Result { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("staking"); - - self.get_opt::<(), _>(path).await.map(|opt| opt.is_some()) - } - /// `POST lighthouse/database/reconstruct` pub async fn post_lighthouse_database_reconstruct(&self) -> Result { let mut path = self.server.full.clone(); diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index d7d5a00df5..4407e30e43 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -197,3 +197,13 @@ pub struct SingleExportKeystoresResponse { pub struct SetGraffitiRequest { pub graffiti: GraffitiString, } + +#[derive(Serialize, Deserialize, Debug)] +pub struct UpdateCandidatesRequest { + pub beacon_nodes: Vec, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct UpdateCandidatesResponse { + pub new_beacon_nodes_list: Vec, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 00c43e3dc3..6cdf67e6e5 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -349,6 +349,14 @@ pub struct ValidatorBalanceData { pub balance: u64, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorIdentityData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub pubkey: PublicKeyBytes, + pub activation_epoch: Epoch, +} + // Implemented according to what is described here: // // https://hackmd.io/ofFJ5gOmQpu1jjHilHbdQQ @@ -694,6 +702,12 @@ pub struct ValidatorBalancesRequestBody { pub ids: Vec, } +#[derive(Clone, Default, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ValidatorIdentitiesRequestBody { + pub ids: Vec, +} + #[derive(Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct BlobIndicesQuery { @@ -701,6 +715,13 @@ pub struct BlobIndicesQuery { pub indices: Option>, } +#[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct DataColumnIndicesQuery { + #[serde(default, deserialize_with = "option_query_vec")] + pub indices: Option>, +} + #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); @@ -977,6 +998,35 @@ impl SseBlobSidecar { } } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseDataColumnSidecar { + pub block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + pub kzg_commitments: Vec, + pub versioned_hashes: Vec, +} + +impl SseDataColumnSidecar { + pub fn from_data_column_sidecar( + data_column_sidecar: &DataColumnSidecar, + ) -> SseDataColumnSidecar { + let kzg_commitments = data_column_sidecar.kzg_commitments.to_vec(); + let versioned_hashes = kzg_commitments + .iter() + .map(|c| c.calculate_versioned_hash()) + .collect(); + SseDataColumnSidecar { + block_root: data_column_sidecar.block_root(), + index: data_column_sidecar.index, + slot: data_column_sidecar.slot(), + kzg_commitments, + versioned_hashes, + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseFinalizedCheckpoint { pub block: Hash256, @@ -1127,6 +1177,7 @@ pub enum EventKind { SingleAttestation(Box), Block(SseBlock), BlobSidecar(SseBlobSidecar), + DataColumnSidecar(SseDataColumnSidecar), FinalizedCheckpoint(SseFinalizedCheckpoint), Head(SseHead), VoluntaryExit(SignedVoluntaryExit), @@ -1150,6 +1201,7 @@ impl EventKind { EventKind::Head(_) => "head", EventKind::Block(_) => "block", EventKind::BlobSidecar(_) => "blob_sidecar", + EventKind::DataColumnSidecar(_) => "data_column_sidecar", EventKind::Attestation(_) => "attestation", EventKind::SingleAttestation(_) => "single_attestation", EventKind::VoluntaryExit(_) => "voluntary_exit", @@ -1185,6 +1237,11 @@ impl EventKind { "blob_sidecar" => Ok(EventKind::BlobSidecar(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Blob Sidecar: {:?}", e)), )?)), + "data_column_sidecar" => Ok(EventKind::DataColumnSidecar( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Data Column Sidecar: {:?}", e)) + })?, + )), "chain_reorg" => Ok(EventKind::ChainReorg(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Chain Reorg: {:?}", e)), )?)), @@ -1274,6 +1331,7 @@ pub enum EventTopic { Head, Block, BlobSidecar, + DataColumnSidecar, Attestation, SingleAttestation, VoluntaryExit, @@ -1300,6 +1358,7 @@ impl FromStr for EventTopic { "head" => Ok(EventTopic::Head), "block" => Ok(EventTopic::Block), "blob_sidecar" => Ok(EventTopic::BlobSidecar), + "data_column_sidecar" => Ok(EventTopic::DataColumnSidecar), "attestation" => Ok(EventTopic::Attestation), "single_attestation" => Ok(EventTopic::SingleAttestation), "voluntary_exit" => Ok(EventTopic::VoluntaryExit), @@ -1327,6 +1386,7 @@ impl fmt::Display for EventTopic { EventTopic::Head => write!(f, "head"), EventTopic::Block => write!(f, "block"), EventTopic::BlobSidecar => write!(f, "blob_sidecar"), + EventTopic::DataColumnSidecar => write!(f, "data_column_sidecar"), EventTopic::Attestation => write!(f, "attestation"), EventTopic::SingleAttestation => write!(f, "single_attestation"), EventTopic::VoluntaryExit => write!(f, "voluntary_exit"), diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 30313d0672..d50117c09e 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -33,7 +33,7 @@ const M_MMAP_THRESHOLD: c_int = -3; /// https://man7.org/linux/man-pages/man3/mallopt.3.html const ENV_VAR_MMAP_THRESHOLD: &str = "MALLOC_MMAP_THRESHOLD_"; -pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(|| <_>::default()); +pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(Default::default); // Metrics for the malloc. For more information, see: // diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 4224f00acc..d4faf1e4b8 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -10,3 +10,6 @@ futures = { workspace = true } metrics = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(tokio_unstable)"] } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index dbdac600f3..e75aead656 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -92,7 +92,7 @@ impl TaskExecutor { /// This function should only be used during testing. In production, prefer to obtain an /// instance of `Self` via a `environment::RuntimeContext` (see the `lighthouse/environment` /// crate). - #[instrument(parent = None,level = "info", fields(service = service_name), name = "task_executor", skip_all)] + #[instrument(parent = None,fields(service = service_name), name = "task_executor", skip_all)] pub fn new>( handle: T, exit: async_channel::Receiver<()>, @@ -108,7 +108,7 @@ impl TaskExecutor { } /// Clones the task executor adding a service name. - #[instrument(parent = None,level = "info", fields(service = service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = service_name), name = "task_executor", skip_all)] pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { handle_provider: self.handle_provider.clone(), @@ -124,7 +124,7 @@ impl TaskExecutor { /// The purpose of this function is to create a compile error if some function which previously /// returned `()` starts returning something else. Such a case may otherwise result in /// accidental error suppression. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_ignoring_error( &self, task: impl Future> + Send + 'static, @@ -136,7 +136,7 @@ impl TaskExecutor { /// Spawn a task to monitor the completion of another task. /// /// If the other task exits by panicking, then the monitor task will shut down the executor. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] fn spawn_monitor( &self, task_handle: impl Future> + Send + 'static, @@ -144,7 +144,7 @@ impl TaskExecutor { ) { let mut shutdown_sender = self.shutdown_sender(); if let Some(handle) = self.handle() { - handle.spawn(async move { + let fut = async move { let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]); if let Err(join_error) = task_handle.await { if let Ok(_panic) = join_error.try_into_panic() { @@ -153,7 +153,14 @@ impl TaskExecutor { } } drop(timer); - }); + }; + #[cfg(tokio_unstable)] + tokio::task::Builder::new() + .name(&format!("{name}-monitor")) + .spawn_on(fut, &handle) + .expect("Failed to spawn monitor task"); + #[cfg(not(tokio_unstable))] + handle.spawn(fut); } else { debug!("Couldn't spawn monitor task. Runtime shutting down") } @@ -168,7 +175,7 @@ impl TaskExecutor { /// of a panic, the executor will be shut down via `self.signal_tx`. /// /// This function generates prometheus metrics on number of tasks and task duration. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn(&self, task: impl Future + Send + 'static, name: &'static str) { if let Some(task_handle) = self.spawn_handle(task, name) { self.spawn_monitor(task_handle, name) @@ -184,7 +191,7 @@ impl TaskExecutor { /// This is useful in cases where the future to be spawned needs to do additional cleanup work when /// the task is completed/canceled (e.g. writing local variables to disk) or the task is created from /// some framework which does its own cleanup (e.g. a hyper server). - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_without_exit( &self, task: impl Future + Send + 'static, @@ -199,6 +206,12 @@ impl TaskExecutor { int_gauge.inc(); if let Some(handle) = self.handle() { + #[cfg(tokio_unstable)] + tokio::task::Builder::new() + .name(name) + .spawn_on(future, &handle) + .expect("Failed to spawn task"); + #[cfg(not(tokio_unstable))] handle.spawn(future); } else { debug!("Couldn't spawn task. Runtime shutting down"); @@ -222,7 +235,7 @@ impl TaskExecutor { /// The task is cancelled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_handle( &self, task: impl Future + Send + 'static, @@ -234,7 +247,7 @@ impl TaskExecutor { let int_gauge_1 = int_gauge.clone(); int_gauge.inc(); if let Some(handle) = self.handle() { - Some(handle.spawn(async move { + let fut = async move { futures::pin_mut!(exit); let result = match future::select(Box::pin(task), exit).await { future::Either::Left((value, _)) => Some(value), @@ -245,7 +258,16 @@ impl TaskExecutor { }; int_gauge_1.dec(); result - })) + }; + #[cfg(tokio_unstable)] + return Some( + tokio::task::Builder::new() + .name(name) + .spawn_on(fut, &handle) + .expect("Failed to spawn task"), + ); + #[cfg(not(tokio_unstable))] + Some(handle.spawn(fut)) } else { debug!("Couldn't spawn task. Runtime shutting down"); None @@ -261,7 +283,7 @@ impl TaskExecutor { /// The Future returned behaves like the standard JoinHandle which can return an error if the /// task failed. /// This function generates prometheus metrics on number of tasks and task duration. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_blocking_handle( &self, task: F, @@ -310,7 +332,7 @@ impl TaskExecutor { /// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to /// @paulhauner if you plan to use this function in production. He has put metrics in here to /// track any use of it, so don't think you can pull a sneaky one on him. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn block_on_dangerous( &self, future: F, @@ -346,7 +368,7 @@ impl TaskExecutor { } /// Returns a `Handle` to the current runtime. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn handle(&self) -> Option { self.handle_provider.handle() } @@ -361,7 +383,7 @@ impl TaskExecutor { } /// Get a channel to request shutting down. - #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] + #[instrument(parent = None, fields(service = self.service_name), name = "task_executor", skip_all)] pub fn shutdown_sender(&self) -> Sender { self.signal_tx.clone() } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 95bdee574d..8d510d0e89 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -15,6 +15,7 @@ use std::fmt; use std::sync::Mutex; use std::time::Duration; use store::MemoryStore; +use types::SingleAttestation; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, @@ -463,10 +464,17 @@ impl ForkChoiceTest { ) .expect("should sign attestation"); + let single_attestation = SingleAttestation { + attester_index: validator_index as u64, + committee_index: validator_committee_index as u64, + data: attestation.data().clone(), + signature: attestation.signature().clone(), + }; + let mut verified_attestation = self .harness .chain - .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)) + .verify_unaggregated_attestation_for_gossip(&single_attestation, Some(subnet_id)) .expect("precondition: should gossip verify attestation"); if let MutationDelay::Blocks(slots) = delay { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index dde2411787..76a07ac6be 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -856,7 +856,7 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::iter_nodes` - pub fn iter_nodes(&self, block_root: &Hash256) -> Iter { + pub fn iter_nodes(&self, block_root: &Hash256) -> Iter<'_> { self.proto_array.iter_nodes(block_root) } diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 0c176d4ab1..d0086c1041 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -148,12 +148,12 @@ impl ConsensusContext { } #[allow(unknown_lints)] - #[allow(elided_named_lifetimes)] + #[allow(mismatched_lifetime_syntaxes)] pub fn get_indexed_attestation<'a>( &'a mut self, state: &BeaconState, attestation: AttestationRef<'a, E>, - ) -> Result, BlockOperationError> { + ) -> Result, BlockOperationError> { let key = attestation.tree_hash_root(); match attestation { AttestationRef::Base(attn) => match self.indexed_attestations.entry(key) { diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 39f438f97f..dafd0d79ea 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -56,7 +56,7 @@ impl From for Error { pub fn get_pubkey_from_state( state: &BeaconState, validator_index: usize, -) -> Option> +) -> Option> where E: EthSpec, { diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index 7485e365ec..9db2ff3096 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -30,6 +30,7 @@ pub enum EpochProcessingError { MissingEarliestExitEpoch, MissingExitBalanceToConsume, PendingDepositsLogicError, + ProposerLookaheadOutOfBounds(usize), } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index af6a0936e2..ae1e330043 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -19,7 +19,7 @@ use types::{ milhouse::Cow, ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, + ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, Vector, }; pub struct SinglePassConfig { @@ -30,6 +30,7 @@ pub struct SinglePassConfig { pub pending_deposits: bool, pub pending_consolidations: bool, pub effective_balance_updates: bool, + pub proposer_lookahead: bool, } impl Default for SinglePassConfig { @@ -48,6 +49,7 @@ impl SinglePassConfig { pending_deposits: true, pending_consolidations: true, effective_balance_updates: true, + proposer_lookahead: true, } } @@ -60,6 +62,7 @@ impl SinglePassConfig { pending_deposits: false, pending_consolidations: false, effective_balance_updates: false, + proposer_lookahead: false, } } } @@ -460,9 +463,43 @@ pub fn process_epoch_single_pass( next_epoch_cache.into_epoch_cache(next_epoch_activation_queue, spec)?; } + if conf.proposer_lookahead && fork_name.fulu_enabled() { + process_proposer_lookahead(state, spec)?; + } + Ok(summary) } +// TOOO(EIP-7917): use balances cache +pub fn process_proposer_lookahead( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let mut lookahead = state.proposer_lookahead()?.clone().to_vec(); + + // Shift out proposers in the first epoch + lookahead.copy_within((E::slots_per_epoch() as usize).., 0); + + let next_epoch = state + .current_epoch() + .safe_add(spec.min_seed_lookahead.as_u64())? + .safe_add(1)?; + let last_epoch_proposers = state.get_beacon_proposer_indices(next_epoch, spec)?; + + // Fill in the last epoch with new proposer indices + let last_epoch_start = E::proposer_lookahead_slots().safe_sub(E::slots_per_epoch() as usize)?; + for (i, proposer) in last_epoch_proposers.into_iter().enumerate() { + let index = last_epoch_start.safe_add(i)?; + *lookahead + .get_mut(index) + .ok_or(Error::ProposerLookaheadOutOfBounds(index))? = proposer as u64; + } + + *state.proposer_lookahead_mut()? = Vector::new(lookahead)?; + + Ok(()) +} + fn process_single_inactivity_update( inactivity_score: &mut Cow, validator_info: &ValidatorInfo, diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs index 6e0cd3fa9d..6b038ad73a 100644 --- a/consensus/state_processing/src/upgrade/fulu.rs +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -1,5 +1,8 @@ +use safe_arith::SafeArith; use std::mem; -use types::{BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork}; +use types::{ + BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork, Vector, +}; /// Transform a `Electra` state into an `Fulu` state. pub fn upgrade_to_fulu( @@ -15,11 +18,32 @@ pub fn upgrade_to_fulu( Ok(()) } +fn initialize_proposer_lookahead( + state: &BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + let current_epoch = state.current_epoch(); + let mut lookahead = Vec::with_capacity(E::proposer_lookahead_slots()); + for i in 0..(spec.min_seed_lookahead.safe_add(1)?.as_u64()) { + let target_epoch = current_epoch.safe_add(i)?; + lookahead.extend( + state + .get_beacon_proposer_indices(target_epoch, spec) + .map(|vec| vec.into_iter().map(|x| x as u64))?, + ); + } + + Vector::new(lookahead).map_err(|e| { + Error::PleaseNotifyTheDevs(format!("Failed to initialize proposer lookahead: {:?}", e)) + }) +} + pub fn upgrade_state_to_fulu( pre_state: &mut BeaconState, spec: &ChainSpec, ) -> Result, Error> { let epoch = pre_state.current_epoch(); + let proposer_lookahead = initialize_proposer_lookahead(pre_state, spec)?; let pre = pre_state.as_electra_mut()?; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. @@ -89,6 +113,7 @@ pub fn upgrade_state_to_fulu( exit_cache: mem::take(&mut pre.exit_cache), slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: mem::take(&mut pre.epoch_cache), + proposer_lookahead, }); Ok(post) } diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index f99effe0f1..22e26e4025 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -32,10 +32,10 @@ MAX_ATTESTATIONS_ELECTRA: 8 # Execution # --------------------------------------------------------------- -# [customized] 2**2 (= 4) deposit requests -MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 4 -# [customized] 2**1 (= 2) withdrawal requests -MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 +# 2**13 (= 8,192) deposit requests +MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 +# 2**4 (= 16) withdrawal requests +MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # 2**1 (= 2) consolidation requests MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 286e4622f8..569d73820c 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,7 +1,13 @@ -use crate::context_deserialize; +use super::{ + AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, + Signature, SignedRoot, +}; use crate::slot_data::SlotData; +use crate::{context_deserialize, IndexedAttestation}; use crate::{test_utils::TestRandom, Hash256, Slot}; -use crate::{Checkpoint, ContextDeserialize, ForkName}; +use crate::{ + Checkpoint, ContextDeserialize, ForkName, IndexedAttestationBase, IndexedAttestationElectra, +}; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; @@ -12,11 +18,6 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, -}; - #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -246,10 +247,17 @@ impl Attestation { attester_index: u64, ) -> Result { match self { - Self::Base(_) => Err(Error::IncorrectStateVariant), + Self::Base(attn) => attn.to_single_attestation_with_attester_index(attester_index), Self::Electra(attn) => attn.to_single_attestation_with_attester_index(attester_index), } } + + pub fn get_aggregation_bits(&self) -> Vec { + match self { + Self::Base(attn) => attn.get_aggregation_bits(), + Self::Electra(attn) => attn.get_aggregation_bits(), + } + } } impl AttestationRef<'_, E> { @@ -461,6 +469,26 @@ impl AttestationBase { ) -> Result, ssz::BitfieldError> { self.aggregation_bits.resize::() } + + pub fn get_aggregation_bits(&self) -> Vec { + self.aggregation_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect() + } + + pub fn to_single_attestation_with_attester_index( + &self, + attester_index: u64, + ) -> Result { + Ok(SingleAttestation { + committee_index: self.data.index, + attester_index, + data: self.data.clone(), + signature: self.signature.clone(), + }) + } } impl SlotData for Attestation { @@ -483,7 +511,7 @@ pub enum AttestationOnDisk { } impl AttestationOnDisk { - pub fn to_ref(&self) -> AttestationRefOnDisk { + pub fn to_ref(&self) -> AttestationRefOnDisk<'_, E> { match self { AttestationOnDisk::Base(att) => AttestationRefOnDisk::Base(att), AttestationOnDisk::Electra(att) => AttestationRefOnDisk::Electra(att), @@ -596,6 +624,24 @@ pub struct SingleAttestation { pub signature: AggregateSignature, } +impl SingleAttestation { + pub fn to_indexed(&self, fork_name: ForkName) -> IndexedAttestation { + if fork_name.electra_enabled() { + IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: vec![self.attester_index].into(), + data: self.data.clone(), + signature: self.signature.clone(), + }) + } else { + IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices: vec![self.attester_index].into(), + data: self.data.clone(), + signature: self.signature.clone(), + }) + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index 8fb5862f21..f671a43c9c 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -141,7 +141,7 @@ impl<'a, E: EthSpec> AttesterSlashingRef<'a, E> { } impl AttesterSlashing { - pub fn attestation_1(&self) -> IndexedAttestationRef { + pub fn attestation_1(&self) -> IndexedAttestationRef<'_, E> { match self { AttesterSlashing::Base(attester_slashing) => { IndexedAttestationRef::Base(&attester_slashing.attestation_1) @@ -152,7 +152,7 @@ impl AttesterSlashing { } } - pub fn attestation_2(&self) -> IndexedAttestationRef { + pub fn attestation_2(&self) -> IndexedAttestationRef<'_, E> { match self { AttesterSlashing::Base(attester_slashing) => { IndexedAttestationRef::Base(&attester_slashing.attestation_2) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ce41eddc17..31bc949583 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -172,6 +172,7 @@ pub enum Error { AggregatorNotInCommittee { aggregator_index: u64, }, + PleaseNotifyTheDevs(String), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -544,6 +545,12 @@ where #[superstruct(only(Electra, Fulu))] pub pending_consolidations: List, + // Fulu + #[compare_fields(as_iter)] + #[test_random(default)] + #[superstruct(only(Fulu))] + pub proposer_lookahead: Vector, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -801,7 +808,7 @@ impl BeaconState { &self, slot: Slot, index: CommitteeIndex, - ) -> Result { + ) -> Result, Error> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; let cache = self.committee_cache(relative_epoch)?; @@ -816,7 +823,10 @@ impl BeaconState { /// Utilises the committee cache. /// /// Spec v0.12.1 - pub fn get_beacon_committees_at_slot(&self, slot: Slot) -> Result, Error> { + pub fn get_beacon_committees_at_slot( + &self, + slot: Slot, + ) -> Result>, Error> { let cache = self.committee_cache_at_slot(slot)?; cache.get_beacon_committees_at_slot(slot) } @@ -829,7 +839,7 @@ impl BeaconState { pub fn get_beacon_committees_at_epoch( &self, relative_epoch: RelativeEpoch, - ) -> Result, Error> { + ) -> Result>, Error> { let cache = self.committee_cache(relative_epoch)?; cache.get_all_beacon_committees() } @@ -948,6 +958,25 @@ impl BeaconState { } } + // Vec is just much easier to work with here + fn compute_proposer_indices( + &self, + epoch: Epoch, + seed: &[u8], + indices: &[usize], + spec: &ChainSpec, + ) -> Result, Error> { + epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let mut preimage = seed.to_vec(); + preimage.append(&mut int_to_bytes8(slot.as_u64())); + let seed = hash(&preimage); + self.compute_proposer_index(indices, &seed, spec) + }) + .collect() + } + /// Fork-aware abstraction for the shuffling. /// /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. @@ -990,7 +1019,9 @@ impl BeaconState { } /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. - pub fn latest_execution_payload_header(&self) -> Result, Error> { + pub fn latest_execution_payload_header( + &self, + ) -> Result, Error> { match self { BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRef::Bellatrix( @@ -1013,7 +1044,7 @@ impl BeaconState { pub fn latest_execution_payload_header_mut( &mut self, - ) -> Result, Error> { + ) -> Result, Error> { match self { BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRefMut::Bellatrix( @@ -1062,37 +1093,48 @@ impl BeaconState { /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// - /// Spec v0.12.1 + /// Spec v1.6.0-alpha.1 pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { // Proposer indices are only known for the current epoch, due to the dependence on the // effective balances of validators, which change at every epoch transition. let epoch = slot.epoch(E::slots_per_epoch()); + // TODO(EIP-7917): Explore allowing this function to be called with a slot one epoch in the future. if epoch != self.current_epoch() { return Err(Error::SlotOutOfBounds); } - let seed = self.get_beacon_proposer_seed(slot, spec)?; - let indices = self.get_active_validator_indices(epoch, spec)?; + if let Ok(proposer_lookahead) = self.proposer_lookahead() { + // Post-Fulu + let index = slot.as_usize().safe_rem(E::slots_per_epoch() as usize)?; + proposer_lookahead + .get(index) + .ok_or(Error::PleaseNotifyTheDevs(format!( + "Proposer lookahead out of bounds: {} for slot: {}", + index, slot + ))) + .map(|index| *index as usize) + } else { + // Pre-Fulu + let seed = self.get_beacon_proposer_seed(slot, spec)?; + let indices = self.get_active_validator_indices(epoch, spec)?; - self.compute_proposer_index(&indices, &seed, spec) + self.compute_proposer_index(&indices, &seed, spec) + } } - /// Returns the beacon proposer index for each `slot` in `self.current_epoch()`. + /// Returns the beacon proposer index for each `slot` in `epoch`. /// - /// The returned `Vec` contains one proposer index for each slot. For example, if - /// `state.current_epoch() == 1`, then `vec[0]` refers to slot `32` and `vec[1]` refers to slot - /// `33`. It will always be the case that `vec.len() == SLOTS_PER_EPOCH`. - pub fn get_beacon_proposer_indices(&self, spec: &ChainSpec) -> Result, Error> { + /// The returned `Vec` contains one proposer index for each slot in the epoch. + pub fn get_beacon_proposer_indices( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Error> { // Not using the cached validator indices since they are shuffled. - let indices = self.get_active_validator_indices(self.current_epoch(), spec)?; + let indices = self.get_active_validator_indices(epoch, spec)?; - self.current_epoch() - .slot_iter(E::slots_per_epoch()) - .map(|slot| { - let seed = self.get_beacon_proposer_seed(slot, spec)?; - self.compute_proposer_index(&indices, &seed, spec) - }) - .collect() + let preimage = self.get_seed(epoch, Domain::BeaconProposer, spec)?; + self.compute_proposer_indices(epoch, preimage.as_slice(), &indices, spec) } /// Compute the seed to use for the beacon proposer selection at the given `slot`. @@ -1676,7 +1718,7 @@ impl BeaconState { pub fn get_validator_cow( &mut self, validator_index: usize, - ) -> Result, Error> { + ) -> Result, Error> { self.validators_mut() .get_cow(validator_index) .ok_or(Error::UnknownValidator(validator_index)) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 161f854157..e3fb339c87 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -159,7 +159,7 @@ impl CommitteeCache { &self, slot: Slot, index: CommitteeIndex, - ) -> Option { + ) -> Option> { if self.initialized_epoch.is_none() || !self.is_initialized_at(slot.epoch(self.slots_per_epoch)) || index >= self.committees_per_slot @@ -185,7 +185,10 @@ impl CommitteeCache { /// Get all the Beacon committees at a given `slot`. /// /// Committees are sorted by ascending index order 0..committees_per_slot - pub fn get_beacon_committees_at_slot(&self, slot: Slot) -> Result, Error> { + pub fn get_beacon_committees_at_slot( + &self, + slot: Slot, + ) -> Result>, Error> { if self.initialized_epoch.is_none() { return Err(Error::CommitteeCacheUninitialized(None)); } @@ -199,7 +202,7 @@ impl CommitteeCache { } /// Returns all committees for `self.initialized_epoch`. - pub fn get_all_beacon_committees(&self) -> Result, Error> { + pub fn get_all_beacon_committees(&self) -> Result>, Error> { let initialized_epoch = self .initialized_epoch .ok_or(Error::CommitteeCacheUninitialized(None))?; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7b9950db91..b4fd5afe87 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -203,6 +203,8 @@ pub struct ChainSpec { pub data_column_sidecar_subnet_count: u64, pub samples_per_slot: u64, pub custody_requirement: u64, + pub validator_custody_requirement: u64, + pub balance_per_additional_custody_group: u64, /* * Networking @@ -243,7 +245,7 @@ pub struct ChainSpec { /* * Networking Fulu */ - max_blobs_per_block_fulu: u64, + blob_schedule: BlobSchedule, /* * Networking Derived @@ -653,19 +655,40 @@ impl ChainSpec { } } - /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. + /// Return the value of `MAX_BLOBS_PER_BLOCK` for the given `epoch`. + /// NOTE: this function is *technically* not spec compliant, but + /// I'm told this is what the other clients are doing for `devnet-0`.. pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { - self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => self + .blob_schedule + .max_blobs_for_epoch(epoch) + .unwrap_or(self.max_blobs_per_block_electra), + _ => match self.electra_fork_epoch { + Some(electra_epoch) if epoch >= electra_epoch => self.max_blobs_per_block_electra, + _ => self.max_blobs_per_block, + }, + } } - /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. - pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { - if fork_name.fulu_enabled() { - self.max_blobs_per_block_fulu - } else if fork_name.electra_enabled() { - self.max_blobs_per_block_electra + // TODO(EIP-7892): remove this once we have fork-version changes on BPO forks + pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 { + if !fork_name.fulu_enabled() { + if fork_name.electra_enabled() { + self.max_blobs_per_block_electra + } else { + self.max_blobs_per_block + } } else { - self.max_blobs_per_block + // Find the max blobs per block in the fork schedule + // This logic will need to be more complex once there are forks beyond Fulu + let mut max_blobs_per_block = self.max_blobs_per_block_electra; + for entry in &self.blob_schedule { + if entry.max_blobs_per_block > max_blobs_per_block { + max_blobs_per_block = entry.max_blobs_per_block; + } + } + max_blobs_per_block } } @@ -710,14 +733,6 @@ impl ChainSpec { Ok(std::cmp::max(custody_column_count, self.samples_per_slot)) } - pub fn custody_group_count(&self, is_supernode: bool) -> u64 { - if is_supernode { - self.number_of_custody_groups - } else { - self.custody_requirement - } - } - pub fn all_data_column_sidecar_subnets(&self) -> impl Iterator { (0..self.data_column_sidecar_subnet_count).map(DataColumnSubnetId::new) } @@ -954,6 +969,8 @@ impl ChainSpec { data_column_sidecar_subnet_count: 128, number_of_columns: 128, samples_per_slot: 8, + validator_custody_requirement: 8, + balance_per_additional_custody_group: 32000000000, /* * Network specific @@ -1002,7 +1019,7 @@ impl ChainSpec { /* * Networking Fulu specific */ - max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + blob_schedule: BlobSchedule::default(), /* * Application specific @@ -1288,6 +1305,8 @@ impl ChainSpec { data_column_sidecar_subnet_count: 128, number_of_columns: 128, samples_per_slot: 8, + validator_custody_requirement: 8, + balance_per_additional_custody_group: 32000000000, /* * Network specific @@ -1336,7 +1355,7 @@ impl ChainSpec { /* * Networking Fulu specific */ - max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + blob_schedule: BlobSchedule::default(), /* * Application specific @@ -1357,6 +1376,75 @@ impl Default for ChainSpec { } } +#[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "UPPERCASE")] +pub struct BPOFork { + epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block: u64, +} + +// A wrapper around a vector of BPOFork to ensure that the vector is reverse +// sorted by epoch. +#[derive(arbitrary::Arbitrary, Serialize, Debug, PartialEq, Clone)] +pub struct BlobSchedule(Vec); + +impl<'de> Deserialize<'de> for BlobSchedule { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let vec = Vec::::deserialize(deserializer)?; + Ok(BlobSchedule::new(vec)) + } +} + +impl BlobSchedule { + pub fn new(mut vec: Vec) -> Self { + // reverse sort by epoch + vec.sort_by(|a, b| b.epoch.cmp(&a.epoch)); + Self(vec) + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn max_blobs_for_epoch(&self, epoch: Epoch) -> Option { + self.0 + .iter() + .find(|entry| epoch >= entry.epoch) + .map(|entry| entry.max_blobs_per_block) + } + + pub const fn default() -> Self { + // TODO(EIP-7892): think about what the default should be + Self(vec![]) + } + + pub fn as_vec(&self) -> &Vec { + &self.0 + } +} + +impl<'a> IntoIterator for &'a BlobSchedule { + type Item = &'a BPOFork; + type IntoIter = std::slice::Iter<'a, BPOFork>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl IntoIterator for BlobSchedule { + type Item = BPOFork; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + /// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). /// /// Fields relevant to hard forks after Altair should be optional so that we can continue @@ -1557,9 +1645,15 @@ pub struct Config { #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, - #[serde(default = "default_max_blobs_per_block_fulu")] + #[serde(default = "BlobSchedule::default")] + #[serde(skip_serializing_if = "BlobSchedule::is_empty")] + blob_schedule: BlobSchedule, + #[serde(default = "default_validator_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] - max_blobs_per_block_fulu: u64, + validator_custody_requirement: u64, + #[serde(default = "default_balance_per_additional_custody_group")] + #[serde(with = "serde_utils::quoted_u64")] + balance_per_additional_custody_group: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1697,10 +1791,6 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } -const fn default_max_blobs_per_block_fulu() -> u64 { - 12 -} - const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1729,6 +1819,14 @@ const fn default_samples_per_slot() -> u64 { 8 } +const fn default_validator_custody_requirement() -> u64 { + 8 +} + +const fn default_balance_per_additional_custody_group() -> u64 { + 32000000000 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -1937,7 +2035,9 @@ impl Config { data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, samples_per_slot: spec.samples_per_slot, custody_requirement: spec.custody_requirement, - max_blobs_per_block_fulu: spec.max_blobs_per_block_fulu, + blob_schedule: spec.blob_schedule.clone(), + validator_custody_requirement: spec.validator_custody_requirement, + balance_per_additional_custody_group: spec.balance_per_additional_custody_group, } } @@ -2016,7 +2116,9 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, - max_blobs_per_block_fulu, + ref blob_schedule, + validator_custody_requirement, + balance_per_additional_custody_group, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -2100,7 +2202,9 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, - max_blobs_per_block_fulu, + blob_schedule: blob_schedule.clone(), + validator_custody_requirement, + balance_per_additional_custody_group, ..chain_spec.clone() }) @@ -2287,6 +2391,140 @@ mod yaml_tests { assert_eq!(from, yamlconfig); } + #[test] + fn blob_schedule_max_blobs_per_block() { + let spec_contents = r#" + PRESET_BASE: 'mainnet' + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384 + MIN_GENESIS_TIME: 1748264340 + GENESIS_FORK_VERSION: 0x10355025 + GENESIS_DELAY: 60 + SECONDS_PER_SLOT: 12 + SECONDS_PER_ETH1_BLOCK: 12 + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 + SHARD_COMMITTEE_PERIOD: 256 + ETH1_FOLLOW_DISTANCE: 2048 + INACTIVITY_SCORE_BIAS: 4 + INACTIVITY_SCORE_RECOVERY_RATE: 16 + EJECTION_BALANCE: 16000000000 + MIN_PER_EPOCH_CHURN_LIMIT: 4 + CHURN_LIMIT_QUOTIENT: 65536 + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + PROPOSER_SCORE_BOOST: 40 + REORG_HEAD_WEIGHT_THRESHOLD: 20 + REORG_PARENT_WEIGHT_THRESHOLD: 160 + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + DEPOSIT_CHAIN_ID: 7042643276 + DEPOSIT_NETWORK_ID: 7042643276 + DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + + ALTAIR_FORK_VERSION: 0x20355025 + ALTAIR_FORK_EPOCH: 0 + BELLATRIX_FORK_VERSION: 0x30355025 + BELLATRIX_FORK_EPOCH: 0 + CAPELLA_FORK_VERSION: 0x40355025 + CAPELLA_FORK_EPOCH: 0 + DENEB_FORK_VERSION: 0x50355025 + DENEB_FORK_EPOCH: 64 + ELECTRA_FORK_VERSION: 0x60355025 + ELECTRA_FORK_EPOCH: 128 + FULU_FORK_VERSION: 0x70355025 + FULU_FORK_EPOCH: 256 + BLOB_SCHEDULE: + - EPOCH: 512 + MAX_BLOBS_PER_BLOCK: 12 + - EPOCH: 768 + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 1024 + MAX_BLOBS_PER_BLOCK: 18 + - EPOCH: 1280 + MAX_BLOBS_PER_BLOCK: 9 + - EPOCH: 1584 + MAX_BLOBS_PER_BLOCK: 20 + "#; + let config: Config = + serde_yaml::from_str(spec_contents).expect("error while deserializing"); + let spec = + ChainSpec::from_config::(&config).expect("error while creating spec"); + + // test out max_blobs_per_block(epoch) + assert_eq!( + spec.max_blobs_per_block(Epoch::new(64)), + default_max_blobs_per_block() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(127)), + default_max_blobs_per_block() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(128)), + default_max_blobs_per_block_electra() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(255)), + default_max_blobs_per_block_electra() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(256)), + default_max_blobs_per_block_electra() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(511)), + default_max_blobs_per_block_electra() + ); + assert_eq!(spec.max_blobs_per_block(Epoch::new(512)), 12); + assert_eq!(spec.max_blobs_per_block(Epoch::new(767)), 12); + assert_eq!(spec.max_blobs_per_block(Epoch::new(768)), 15); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1023)), 15); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1024)), 18); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1279)), 18); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1280)), 9); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1583)), 9); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1584)), 20); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(18446744073709551615)), + 20 + ); + + // blob schedule is reverse sorted by epoch + assert_eq!( + config.blob_schedule.as_vec(), + &vec![ + BPOFork { + epoch: Epoch::new(1584), + max_blobs_per_block: 20 + }, + BPOFork { + epoch: Epoch::new(1280), + max_blobs_per_block: 9 + }, + BPOFork { + epoch: Epoch::new(1024), + max_blobs_per_block: 18 + }, + BPOFork { + epoch: Epoch::new(768), + max_blobs_per_block: 15 + }, + BPOFork { + epoch: Epoch::new(512), + max_blobs_per_block: 12 + }, + ] + ); + + // test max_blobs_per_block_within_fork + assert_eq!( + spec.max_blobs_per_block_within_fork(ForkName::Deneb), + default_max_blobs_per_block() + ); + assert_eq!( + spec.max_blobs_per_block_within_fork(ForkName::Electra), + default_max_blobs_per_block_electra() + ); + assert_eq!(spec.max_blobs_per_block_within_fork(ForkName::Fulu), 20); + } + #[test] fn apply_to_spec() { let mut spec = ChainSpec::minimal(); diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 5ec2b28b2b..612ddb6eb8 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -6,12 +6,14 @@ use crate::{ SignedBeaconBlockHeader, Slot, }; use bls::Signature; +use context_deserialize::ContextDeserialize; use derivative::Derivative; use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; use safe_arith::ArithError; -use serde::{Deserialize, Serialize}; +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::{DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::Error as SszError; @@ -26,12 +28,49 @@ pub type Cell = FixedVector::BytesPerCell>; pub type DataColumn = VariableList, ::MaxBlobCommitmentsPerBlock>; /// Identifies a set of data columns associated with a specific beacon block. -#[derive(Encode, Clone, Debug, PartialEq)] +#[derive(Encode, Clone, Debug, PartialEq, TreeHash)] pub struct DataColumnsByRootIdentifier { pub block_root: Hash256, pub columns: RuntimeVariableList, } +impl<'de> ContextDeserialize<'de, (ForkName, usize)> for DataColumnsByRootIdentifier { + fn context_deserialize(deserializer: D, context: (ForkName, usize)) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + block_root: Hash256, + columns: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + Ok(Self { + block_root: helper.block_root, + columns: RuntimeVariableList::context_deserialize(helper.columns, context) + .map_err(Error::custom)?, + }) + } +} + +impl DataColumnsByRootIdentifier { + pub fn from_ssz_bytes(bytes: &[u8], num_columns: usize) -> Result { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_type::()?; + builder.register_anonymous_variable_length_item()?; + + let mut decoder = builder.build()?; + let block_root = decoder.decode_next()?; + let columns = decoder + .decode_next_with(|bytes| RuntimeVariableList::from_ssz_bytes(bytes, num_columns))?; + Ok(DataColumnsByRootIdentifier { + block_root, + columns, + }) + } +} + impl RuntimeVariableList { pub fn from_ssz_bytes_with_nested( bytes: &[u8], @@ -47,21 +86,7 @@ impl RuntimeVariableList { Some(max_len), )? .into_iter() - .map(|bytes| { - let mut builder = ssz::SszDecoderBuilder::new(&bytes); - builder.register_type::()?; - builder.register_anonymous_variable_length_item()?; - - let mut decoder = builder.build()?; - let block_root = decoder.decode_next()?; - let columns = decoder.decode_next_with(|bytes| { - RuntimeVariableList::from_ssz_bytes(bytes, num_columns) - })?; - Ok(DataColumnsByRootIdentifier { - block_root, - columns, - }) - }) + .map(|bytes| DataColumnsByRootIdentifier::from_ssz_bytes(&bytes, num_columns)) .collect::, _>>()?; Ok(RuntimeVariableList::from_vec(vec, max_len)) diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 6f1b3e6ce6..bc87a4bd80 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -118,6 +118,7 @@ pub trait EthSpec: type FieldElementsPerCell: Unsigned + Clone + Sync + Send + Debug + PartialEq; type FieldElementsPerExtBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type KzgCommitmentsInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type ProposerLookaheadSlots: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -378,6 +379,10 @@ pub trait EthSpec: fn kzg_commitments_inclusion_proof_depth() -> usize { Self::KzgCommitmentsInclusionProofDepth::to_usize() } + + fn proposer_lookahead_slots() -> usize { + Self::ProposerLookaheadSlots::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -429,6 +434,7 @@ impl EthSpec for MainnetEthSpec { type MaxCellsPerBlock = U33554432; type KzgCommitmentInclusionProofDepth = U17; type KzgCommitmentsInclusionProofDepth = U4; // inclusion of the whole list of commitments + type ProposerLookaheadSlots = U64; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -476,13 +482,12 @@ impl EthSpec for MinimalEthSpec { type KzgCommitmentInclusionProofDepth = U10; type PendingPartialWithdrawalsLimit = U64; type PendingConsolidationsLimit = U64; - type MaxDepositRequestsPerPayload = U4; - type MaxWithdrawalRequestsPerPayload = U2; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; type MaxCellsPerBlock = U33554432; type BytesPerCell = U2048; type KzgCommitmentsInclusionProofDepth = U4; + type ProposerLookaheadSlots = U16; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -509,7 +514,9 @@ impl EthSpec for MinimalEthSpec { MaxPendingDepositsPerEpoch, MaxConsolidationRequestsPerPayload, MaxAttesterSlashingsElectra, - MaxAttestationsElectra + MaxAttestationsElectra, + MaxDepositRequestsPerPayload, + MaxWithdrawalRequestsPerPayload }); fn default_spec() -> ChainSpec { @@ -576,6 +583,7 @@ impl EthSpec for GnosisEthSpec { type MaxCellsPerBlock = U33554432; type BytesPerCell = U2048; type KzgCommitmentsInclusionProofDepth = U4; + type ProposerLookaheadSlots = U32; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH fn default_spec() -> ChainSpec { ChainSpec::gnosis() @@ -592,9 +600,14 @@ mod test { use ssz_types::typenum::Unsigned; fn assert_valid_spec() { + let spec = E::default_spec(); E::kzg_commitments_tree_depth(); E::block_body_tree_depth(); assert!(E::MaxValidatorsPerSlot::to_i32() >= E::MaxValidatorsPerCommittee::to_i32()); + assert_eq!( + E::proposer_lookahead_slots(), + (spec.min_seed_lookahead.as_usize() + 1) * E::slots_per_epoch() as usize + ); } #[test] diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index 454c8b9e18..2a8899e203 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -6,6 +6,7 @@ use ssz::Decode; use ssz_types::Error; use std::ops::{Deref, Index, IndexMut}; use std::slice::SliceIndex; +use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, TreeHashType}; /// Emulates a SSZ `List`. /// @@ -241,6 +242,62 @@ where } } +impl TreeHash for RuntimeVariableList { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + unreachable!("List should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("List should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + let root = runtime_vec_tree_hash_root::(&self.vec, self.max_len); + + tree_hash::mix_in_length(&root, self.len()) + } +} + +// We can delete this once the upstream `vec_tree_hash_root` is modified to use a runtime max len. +pub fn runtime_vec_tree_hash_root(vec: &[T], max_len: usize) -> Hash256 +where + T: TreeHash, +{ + match T::tree_hash_type() { + TreeHashType::Basic => { + let mut hasher = + MerkleHasher::with_leaves(max_len.div_ceil(T::tree_hash_packing_factor())); + + for item in vec { + hasher + .write(&item.tree_hash_packed_encoding()) + .expect("ssz_types variable vec should not contain more elements than max"); + } + + hasher + .finish() + .expect("ssz_types variable vec should not have a remaining buffer") + } + TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { + let mut hasher = MerkleHasher::with_leaves(max_len); + + for item in vec { + hasher + .write(item.tree_hash_root().as_slice()) + .expect("ssz_types vec should not contain more elements than max"); + } + + hasher + .finish() + .expect("ssz_types vec should not have a remaining buffer") + } + } +} + #[cfg(test)] mod test { use super::*; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 0391756047..66790a9641 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -118,7 +118,7 @@ impl Epoch { .as_u64()) } - pub fn slot_iter(&self, slots_per_epoch: u64) -> SlotIter { + pub fn slot_iter(&self, slots_per_epoch: u64) -> SlotIter<'_> { SlotIter { current_iteration: 0, epoch: self, diff --git a/consensus/types/src/sqlite.rs b/consensus/types/src/sqlite.rs index aa20666ae1..2f3f6d1c80 100644 --- a/consensus/types/src/sqlite.rs +++ b/consensus/types/src/sqlite.rs @@ -8,7 +8,7 @@ use rusqlite::{ macro_rules! impl_to_from_sql { ($type:ty) => { impl ToSql for $type { - fn to_sql(&self) -> Result { + fn to_sql(&self) -> Result, Error> { let val_i64 = i64::try_from(self.as_u64()) .map_err(|e| Error::ToSqlConversionFailure(Box::new(e)))?; Ok(ToSqlOutput::from(val_i64)) diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index f38c28d8b0..f45e4146b7 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -3,8 +3,7 @@ use crate::cli::DatabaseManager; use crate::cli::Migrate; use crate::cli::PruneStates; use beacon_chain::{ - builder::Witness, eth1_chain::CachingEth1Backend, schema_change::migrate_schema, - slot_clock::SystemTimeSlotClock, + builder::Witness, schema_change::migrate_schema, slot_clock::SystemTimeSlotClock, }; use beacon_node::{get_data_dir, ClientConfig}; use clap::ArgMatches; @@ -301,7 +300,6 @@ fn parse_migrate_config(migrate_config: &Migrate) -> Result( migrate_config: MigrateConfig, client_config: ClientConfig, - mut genesis_state: BeaconState, runtime_context: &RuntimeContext, ) -> Result<(), Error> { let spec = runtime_context.eth2_config.spec.clone(); @@ -329,13 +327,7 @@ pub fn migrate_db( "Migrating database schema" ); - let genesis_state_root = genesis_state.canonical_root()?; - migrate_schema::, _, _, _>>( - db, - Some(genesis_state_root), - from, - to, - ) + migrate_schema::>(db, from, to) } pub fn prune_payloads( @@ -487,8 +479,7 @@ pub fn run( match &db_manager_config.subcommand { cli::DatabaseManagerSubcommand::Migrate(migrate_config) => { let migrate_config = parse_migrate_config(migrate_config)?; - let genesis_state = get_genesis_state()?; - migrate_db(migrate_config, client_config, genesis_state, &context).map_err(format_err) + migrate_db(migrate_config, client_config, &context).map_err(format_err) } cli::DatabaseManagerSubcommand::Inspect(inspect_config) => { let inspect_config = parse_inspect_config(inspect_config)?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index cc17f638fd..fdda1696b1 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -33,6 +33,8 @@ slasher-redb = ["slasher/redb"] beacon-node-leveldb = ["store/leveldb"] # Supports beacon node redb backend. beacon-node-redb = ["store/redb"] +# Supports console subscriber for debugging +console-subscriber = ["console-subscriber/default"] # Deprecated. This is now enabled by default on non windows targets. jemalloc = [] @@ -45,6 +47,7 @@ bls = { workspace = true } boot_node = { path = "../boot_node" } clap = { workspace = true } clap_utils = { workspace = true } +console-subscriber = { workspace = true, optional = true } database_manager = { path = "../database_manager" } directory = { workspace = true } environment = { workspace = true } @@ -77,7 +80,6 @@ malloc_utils = { workspace = true } [dev-dependencies] beacon_node_fallback = { workspace = true } beacon_processor = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } initialized_validators = { workspace = true } lighthouse_network = { workspace = true } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 7ddf04db01..bbd8f764e7 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -663,6 +663,12 @@ fn run( logging_layers.push(MetricsLayer.boxed()); + #[cfg(feature = "console-subscriber")] + { + let console_layer = console_subscriber::spawn(); + logging_layers.push(console_layer.boxed()); + } + let logging_result = tracing_subscriber::registry() .with(logging_layers) .try_init(); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ea4716c010..26b6c8ff0e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -8,7 +8,6 @@ use beacon_node::{ beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, ClientConfig as Config, }; use beacon_processor::BeaconProcessorConfig; -use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; @@ -115,11 +114,6 @@ fn staking_flag() { .run_with_zero_port() .with_config(|config| { assert!(config.http_api.enabled); - assert!(config.sync_eth1_chain); - assert_eq!( - config.eth1.endpoint.get_endpoint().to_string(), - DEFAULT_EXECUTION_ENDPOINT - ); }); } @@ -398,51 +392,24 @@ fn genesis_backfill_with_historic_flag() { // Tests for Eth1 flags. // DEPRECATED but should not crash #[test] -fn dummy_eth1_flag() { +fn eth1_blocks_per_log_query_flag() { CommandLineTest::new() - .flag("dummy-eth1", None) + .flag("eth1-blocks-per-log-query", Some("500")) .run_with_zero_port(); } // DEPRECATED but should not crash #[test] -fn eth1_flag() { - CommandLineTest::new() - .flag("eth1", None) - .run_with_zero_port() - .with_config(|config| assert!(config.sync_eth1_chain)); -} -#[test] -fn eth1_blocks_per_log_query_flag() { - CommandLineTest::new() - .flag("eth1-blocks-per-log-query", Some("500")) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.eth1.blocks_per_log_query, 500)); -} -#[test] fn eth1_purge_cache_flag() { CommandLineTest::new() .flag("eth1-purge-cache", None) - .run_with_zero_port() - .with_config(|config| assert!(config.eth1.purge_cache)); -} -#[test] -fn eth1_cache_follow_distance_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.eth1.cache_follow_distance, None); - assert_eq!(config.eth1.cache_follow_distance(), 3 * 2048 / 4); - }); + .run_with_zero_port(); } +// DEPRECATED but should not crash #[test] fn eth1_cache_follow_distance_manual() { CommandLineTest::new() .flag("eth1-cache-follow-distance", Some("128")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.eth1.cache_follow_distance, Some(128)); - assert_eq!(config.eth1.cache_follow_distance(), 128); - }); + .run_with_zero_port(); } // Tests for Bellatrix flags. @@ -755,8 +722,6 @@ fn test_builder_disable_ssz_flag() { } fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { - use sensitive_url::SensitiveUrl; - let dir = TempDir::new().expect("Unable to create temporary directory"); let execution_endpoint = "http://meow.cats"; let jwt_file = "jwt-file"; @@ -772,15 +737,6 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let el_config = config.execution_layer.as_ref().unwrap(); assert_eq!(el_config.jwt_id, Some(id.to_string())); assert_eq!(el_config.jwt_version, Some(version.to_string())); - assert_eq!( - config.eth1.endpoint, - Eth1Endpoint::Auth { - endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), - jwt_path: dir.path().join(jwt_file), - jwt_id: Some(id.to_string()), - jwt_version: Some(version.to_string()), - } - ); }); } #[test] @@ -1927,22 +1883,43 @@ fn hdiff_buffer_cache_size_flag() { .flag("hdiff-buffer-cache-size", Some("1")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.store.hdiff_buffer_cache_size.get(), 1); + assert_eq!(config.store.cold_hdiff_buffer_cache_size.get(), 1); }); } #[test] fn hdiff_buffer_cache_size_default() { - use beacon_node::beacon_chain::store::config::DEFAULT_HDIFF_BUFFER_CACHE_SIZE; + use beacon_node::beacon_chain::store::config::DEFAULT_COLD_HDIFF_BUFFER_CACHE_SIZE; CommandLineTest::new() .run_with_zero_port() .with_config(|config| { assert_eq!( - config.store.hdiff_buffer_cache_size, - DEFAULT_HDIFF_BUFFER_CACHE_SIZE + config.store.cold_hdiff_buffer_cache_size, + DEFAULT_COLD_HDIFF_BUFFER_CACHE_SIZE ); }); } #[test] +fn hot_hdiff_buffer_cache_size_default() { + use beacon_node::beacon_chain::store::config::DEFAULT_HOT_HDIFF_BUFFER_CACHE_SIZE; + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store.hot_hdiff_buffer_cache_size, + DEFAULT_HOT_HDIFF_BUFFER_CACHE_SIZE + ); + }); +} +#[test] +fn hot_hdiff_buffer_cache_size_flag() { + CommandLineTest::new() + .flag("hot-hdiff-buffer-cache-size", Some("3")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.store.hot_hdiff_buffer_cache_size.get(), 3); + }); +} +#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false")) @@ -2499,26 +2476,8 @@ fn logfile_format_flag() { ) }); } -#[test] -fn sync_eth1_chain_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(config.sync_eth1_chain)); -} - -#[test] -fn sync_eth1_chain_execution_endpoints_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new_with_no_execution_endpoint() - .flag("execution-endpoints", Some("http://localhost:8551/")) - .flag( - "execution-jwt", - dir.path().join("jwt-file").as_os_str().to_str(), - ) - .run_with_zero_port() - .with_config(|config| assert!(config.sync_eth1_chain)); -} +// DEPRECATED but should not crash. #[test] fn sync_eth1_chain_disable_deposit_contract_sync_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -2529,8 +2488,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { "execution-jwt", dir.path().join("jwt-file").as_os_str().to_str(), ) - .run_with_zero_port() - .with_config(|config| assert!(!config.sync_eth1_chain)); + .run_with_zero_port(); } #[test] @@ -2679,6 +2637,16 @@ fn invalid_gossip_verified_blocks_path() { }); } +#[test] +fn advertise_false_custody_group_count() { + CommandLineTest::new() + .flag("advertise-false-custody-group-count", Some("64")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.advertise_false_custody_group_count, Some(64)) + }); +} + #[test] fn beacon_processor() { CommandLineTest::new() diff --git a/scripts/ci/check-success-job.sh b/scripts/ci/check-success-job.sh index dfa5c03257..2eee35f69e 100755 --- a/scripts/ci/check-success-job.sh +++ b/scripts/ci/check-success-job.sh @@ -5,8 +5,13 @@ set -euf -o pipefail YAML=$1 SUCCESS_JOB=$2 +EXCLUDE_JOBS_REGEX=${3:-} + +yq '... comments="" | .jobs | map(. | key) | .[]' < "$YAML" | + grep -v "$SUCCESS_JOB" | + { [ -n "$EXCLUDE_JOBS_REGEX" ] && grep -Ev "$EXCLUDE_JOBS_REGEX" || cat; } | + sort > all_jobs.txt -yq '... comments="" | .jobs | map(. | key) | .[]' < "$YAML" | grep -v "$SUCCESS_JOB" | sort > all_jobs.txt yq "... comments=\"\" | .jobs.$SUCCESS_JOB.needs[]" < "$YAML" | grep -v "$SUCCESS_JOB" | sort > dep_jobs.txt diff all_jobs.txt dep_jobs.txt || (echo "COMPLETENESS CHECK FAILED" && exit 1) rm all_jobs.txt dep_jobs.txt diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index e671340afb..05f1c5a54c 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -8,7 +8,7 @@ participants: - --target-peers=3 count: 4 network_params: - deneb_fork_epoch: 0 + electra_fork_epoch: 0 seconds_per_slot: 3 global_log_level: debug snooper_enabled: false diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml index 628b2696a5..c896b11330 100644 --- a/scripts/local_testnet/network_params_das.yaml +++ b/scripts/local_testnet/network_params_das.yaml @@ -1,7 +1,8 @@ participants: - cl_type: lighthouse cl_image: lighthouse:local - el_image: ethpandaops/geth:marius-engine-getblobs-v2 + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 cl_extra_params: - --subscribe-all-data-column-subnets - --subscribe-all-subnets @@ -11,7 +12,8 @@ participants: count: 2 - cl_type: lighthouse cl_image: lighthouse:local - el_image: ethpandaops/geth:marius-engine-getblobs-v2 + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 cl_extra_params: # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - --sync-tolerance-epochs=0 diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 1f15688693..442e6fd98d 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -13,10 +13,12 @@ BUILD_IMAGE=true BUILDER_PROPOSALS=false CI=false KEEP_ENCLAVE=false +RUN_ASSERTOOR_TESTS=false # Get options -while getopts "e:b:n:phck" flag; do +while getopts "e:b:n:phcak" flag; do case "${flag}" in + a) RUN_ASSERTOOR_TESTS=true;; e) ENCLAVE_NAME=${OPTARG};; b) BUILD_IMAGE=${OPTARG};; n) NETWORK_PARAMS_FILE=${OPTARG};; @@ -34,6 +36,7 @@ while getopts "e:b:n:phck" flag; do echo " -n: kurtosis network params file path default: $NETWORK_PARAMS_FILE" echo " -p: enable builder proposals" echo " -c: CI mode, run without other additional services like Grafana and Dora explorer" + echo " -a: run Assertoor tests" echo " -k: keeping enclave to allow starting the testnet without destroying the existing one" echo " -h: this help" exit @@ -63,15 +66,22 @@ if [ "$BUILDER_PROPOSALS" = true ]; then fi if [ "$CI" = true ]; then - # TODO: run assertoor tests yq eval '.additional_services = []' -i $NETWORK_PARAMS_FILE echo "Running without additional services (CI mode)." fi +if [ "$RUN_ASSERTOOR_TESTS" = true ]; then + yq eval '.additional_services += ["assertoor"] | .additional_services |= unique' -i $NETWORK_PARAMS_FILE + # The available tests can be found in the `assertoor_params` section: + # https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration + yq eval '.assertoor_params = {"run_stability_check": true, "run_block_proposal_check": true, "run_transaction_test": true, "run_blob_transaction_test": true}' -i $NETWORK_PARAMS_FILE + echo "Assertoor has been added to $NETWORK_PARAMS_FILE." +fi + if [ "$BUILD_IMAGE" = true ]; then echo "Building Lighthouse Docker image." ROOT_DIR="$SCRIPT_DIR/../.." - docker build --build-arg FEATURES=portable -f $ROOT_DIR/Dockerfile -t $LH_IMAGE_NAME $ROOT_DIR + docker build --build-arg FEATURES=portable,spec-minimal -f $ROOT_DIR/Dockerfile -t $LH_IMAGE_NAME $ROOT_DIR else echo "Not rebuilding Lighthouse Docker image." fi diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 6af1989e9f..b90a891154 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -6,10 +6,21 @@ ENCLAVE_NAME=${1:-local-testnet} LOGS_PATH=$SCRIPT_DIR/logs LOGS_SUBDIR=$LOGS_PATH/$ENCLAVE_NAME +# Extract the service names of Lighthouse beacon nodes that start with "cl-". +services=$(kurtosis enclave inspect "$ENCLAVE_NAME" | awk '/^=+ User Services =+$/ { in_section=1; next } + /^=+/ { in_section=0 } + in_section && /^[0-9a-f]{12}/ { print $2 }' | grep '^cl-') + +# Store logs (including dependency logs) to Kurtosis Files Artifacts. These are downloaded locally by `kurtosis enclave dump`. +for service in $services; do + kurtosis files storeservice --name "$service-logs" "$ENCLAVE_NAME" "$service" /data/lighthouse/beacon-data/beacon/logs/ +done + # Delete existing logs directory and make sure parent directory exists. rm -rf $LOGS_SUBDIR && mkdir -p $LOGS_PATH kurtosis enclave dump $ENCLAVE_NAME $LOGS_SUBDIR echo "Local testnet logs stored to $LOGS_SUBDIR." +echo "The lighthouse beacon nodes' logs (including dependency logs) can be found in $LOGS_SUBDIR/files/cl-*-lighthouse-geth-logs." kurtosis enclave rm -f $ENCLAVE_NAME kurtosis engine stop diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml new file mode 100644 index 0000000000..de3020a884 --- /dev/null +++ b/scripts/tests/checkpoint-sync-config-devnet.yaml @@ -0,0 +1,20 @@ +# Kurtosis config file to checkpoint sync to a running devnet supported by ethPandaOps and `ethereum-package`. +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 + supernode: true + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 + supernode: false + +checkpoint_sync_enabled: true +checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-1.ethpandaops.io" + +global_log_level: debug + +network_params: + network: fusaka-devnet-1 diff --git a/scripts/tests/checkpoint-sync-config-sepolia.yaml b/scripts/tests/checkpoint-sync-config-sepolia.yaml new file mode 100644 index 0000000000..289dee7869 --- /dev/null +++ b/scripts/tests/checkpoint-sync-config-sepolia.yaml @@ -0,0 +1,20 @@ +# Kurtosis config file to checkpoint sync to a live network (Sepolia). +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethereum/client-go:latest + supernode: true + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethereum/client-go:latest + supernode: false + +checkpoint_sync_enabled: true +checkpoint_sync_url: "https://checkpoint-sync.sepolia.ethpandaops.io" + +global_log_level: debug + +network_params: + network: sepolia diff --git a/scripts/tests/checkpoint-sync.sh b/scripts/tests/checkpoint-sync.sh new file mode 100755 index 0000000000..a170d1e94d --- /dev/null +++ b/scripts/tests/checkpoint-sync.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash +# +# Checkpoint sync to a live network. +# +# Start with checkpoint sync and let the node(s) sync to head and perform backfill for a specified number of slots. +# This test ensures we cover all sync components (range, lookup, backfill) and measures sync speed +# to detect any performance regressions. +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +ENCLAVE_NAME=${1:-sync-testnet} +CONFIG=${2:-$SCRIPT_DIR/checkpoint-sync-config-sepolia.yaml} + +# Test configuration +# ------------------------------------------------------ +# Interval for polling the /lighthouse/syncing endpoint for sync status +POLL_INTERVAL_SECS=5 +# Target number of slots to backfill to complete this test. +TARGET_BACKFILL_SLOTS=1024 +# Timeout for this test, if the node(s) fail to backfill `TARGET_BACKFILL_SLOTS` slots, fail the test. +TIMEOUT_MINS=10 +TIMEOUT_SECS=$((TIMEOUT_MINS * 60)) +# ------------------------------------------------------ + +# Polls a single node's sync status +poll_node() { + local node_type=$1 + local url=${node_urls[$node_type]} + + response=$(curl -s "${url}/lighthouse/syncing") + + if [ -z "$response" ] || [ "$response" = "null" ]; then + echo "${node_type} status: No response or null response" + return + fi + + # Print syncing status + sync_state=$(echo "$response" | jq -r 'if (.data | type) == "object" then "object" else "string" end' 2>/dev/null) + + if [ "$sync_state" = "object" ]; then + status=$(echo "$response" | jq -r '.data | keys[0] // "Unknown"') + fields=$(echo "$response" | jq -r ".data.${status} | to_entries | map(\"\(.key): \(.value)\") | join(\", \")") + echo "${node_type} status: ${status}, ${fields}" + else + status=$(echo "$response" | jq -r '.data' 2>/dev/null) + echo "${node_type} status: ${status:-Unknown}" + fi + + # Check for completion criteria + if [ "$status" = "BackFillSyncing" ]; then + completed=$(echo "$response" | jq -r ".data.${status}.completed // 0") + if [ "$completed" -ge "$TARGET_BACKFILL_SLOTS" ]; then + mark_node_complete "$node_type" + fi + fi + # For other states (Synced, SyncingFinalized, SyncingHead, SyncTransition, Stalled, Unknown), + # we continue polling + # NOTE: there is a bug where Lighthouse briefly switch to "Synced" before completing backfilling. We ignore this state + # as it's unlikely a node is fully synced without going through backfilling `TARGET_BACKFILL_SLOTS` slots (only + # possible on a new network). +} + +# Marks a node as complete and record time +mark_node_complete() { + local node_type=$1 + if [ "${node_completed[$node_type]}" = false ]; then + node_completed[$node_type]=true + node_complete_time[$node_type]=$(date +%s) + echo "${node_type} completed backfill in $((node_complete_time[$node_type] - start_time)) seconds" + fi +} + +exit_and_dump_logs() { + local exit_code=$1 + echo "Shutting down..." + $SCRIPT_DIR/../local_testnet/stop_local_testnet.sh $ENCLAVE_NAME + echo "Test completed with exit code $exit_code." + exit $exit_code +} + +# Start the nodes +$SCRIPT_DIR/../local_testnet/start_local_testnet.sh -e $ENCLAVE_NAME -b false -n $CONFIG +if [ $? -ne 0 ]; then + echo "Failed to start local testnet" + exit_and_dump_logs 1 +fi + +start_time=$(date +%s) + +# Get all beacon API URLs +supernode_url=$(kurtosis port print $ENCLAVE_NAME cl-1-lighthouse-geth http) +fullnode_url=$(kurtosis port print $ENCLAVE_NAME cl-2-lighthouse-geth http) + +# Initialize statuses +declare -A node_completed +declare -A node_complete_time +declare -A node_urls + +node_urls["supernode"]="$supernode_url" +node_urls["fullnode"]="$fullnode_url" +node_completed["supernode"]=false +node_completed["fullnode"]=false + +echo "Polling sync status until backfill reaches ${TARGET_BACKFILL_SLOTS} slots or timeout of ${TIMEOUT_MINS} mins" + +while [ "${node_completed[supernode]}" = false ] || [ "${node_completed[fullnode]}" = false ]; do + current_time=$(date +%s) + elapsed=$((current_time - start_time)) + + if [ "$elapsed" -ge "$TIMEOUT_SECS" ]; then + echo "ERROR: Nodes timed out syncing after ${TIMEOUT_MINS} minutes. Exiting." + exit_and_dump_logs 1 + fi + + # Poll each node that hasn't completed yet + for node in "supernode" "fullnode"; do + if [ "${node_completed[$node]}" = false ]; then + poll_node "$node" + fi + done + + sleep $POLL_INTERVAL_SECS +done + +echo "Sync test complete! Both supernode and fullnode have synced to HEAD and backfilled ${TARGET_BACKFILL_SLOTS} slots." +echo "Supernode time: $((node_complete_time[supernode] - start_time)) seconds" +echo "Fullnode time: $((node_complete_time[fullnode] - start_time)) seconds" +exit_and_dump_logs 0 \ No newline at end of file diff --git a/scripts/tests/genesis-sync-config-electra.yaml b/scripts/tests/genesis-sync-config-electra.yaml new file mode 100644 index 0000000000..153f754c94 --- /dev/null +++ b/scripts/tests/genesis-sync-config-electra.yaml @@ -0,0 +1,22 @@ +# Kurtosis config file for testing sync on a local devnet. +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + count: 2 + # nodes without validators, used for testing sync. + - cl_type: lighthouse + cl_image: lighthouse:local + supernode: true # no supernode in Electra, this is for future proof + validator_count: 0 + - cl_type: lighthouse + cl_image: lighthouse:local + supernode: false + validator_count: 0 +network_params: + seconds_per_slot: 6 + electra_fork_epoch: 0 + preset: "minimal" +additional_services: + - tx_fuzz + - spamoor +global_log_level: debug diff --git a/scripts/tests/genesis-sync-config-fulu.yaml b/scripts/tests/genesis-sync-config-fulu.yaml new file mode 100644 index 0000000000..91aa4d1ffd --- /dev/null +++ b/scripts/tests/genesis-sync-config-fulu.yaml @@ -0,0 +1,29 @@ +# Kurtosis config file for testing sync on a local devnet. +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 + count: 2 + # nodes without validators, used for testing sync. + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 + supernode: true + validator_count: 0 + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethpandaops/geth:fusaka-devnet-1 + supernode: false + validator_count: 0 +network_params: + seconds_per_slot: 6 + electra_fork_epoch: 0 + fulu_fork_epoch: 1 + preset: "minimal" +additional_services: + - tx_fuzz + - spamoor +global_log_level: debug diff --git a/scripts/tests/genesis-sync.sh b/scripts/tests/genesis-sync.sh new file mode 100755 index 0000000000..39628c9e73 --- /dev/null +++ b/scripts/tests/genesis-sync.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# +# Genesis sync test on a local network. +# +# Start a local testnet, shut down non-validator nodes for a period, then restart them +# and monitor their sync progress from genesis to head. +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +ENCLAVE_NAME=${1:-genesis-sync-testnet} +CONFIG=${2:-$SCRIPT_DIR/genesis-sync-config-electra.yaml} +FORK_TYPE=${3:-electra} # electra or fulu +OFFLINE_DURATION_SECS=${4:-120} # stopped duration of non validating nodes + +# Test configuration +# ------------------------------------------------------ +# Interval for polling the /lighthouse/syncing endpoint for sync status +# Reduce the polling time so that some progress can be seen +POLL_INTERVAL_SECS=0.5 +# Timeout for this test, if the nodes fail to sync, fail the test. +TIMEOUT_MINS=5 +TIMEOUT_SECS=$((TIMEOUT_MINS * 60)) +# ------------------------------------------------------ + +echo "Starting genesis sync test with:" +echo " Fork: $FORK_TYPE" +echo " Offline duration: ${OFFLINE_DURATION_SECS}s" + +# Polls a node's sync status +poll_node() { + local node_type=$1 + local url=${node_urls[$node_type]} + + response=$(curl -s "${url}/lighthouse/syncing" 2>/dev/null) + + if [ -z "$response" ] || [ "$response" = "null" ]; then + echo "${node_type} status: No response or null response" + return + fi + + # Print syncing status + sync_state=$(echo "$response" | jq -r 'if (.data | type) == "object" then "object" else "string" end' 2>/dev/null) + + if [ "$sync_state" = "object" ]; then + status=$(echo "$response" | jq -r '.data | keys[0] // "Unknown"') + fields=$(echo "$response" | jq -r ".data.${status} | to_entries | map(\"\(.key): \(.value)\") | join(\", \")") + echo "${node_type} status: ${status}, ${fields}" + else + status=$(echo "$response" | jq -r '.data' 2>/dev/null) + echo "${node_type} status: ${status:-Unknown}" + + # The test is complete when the node is synced + if [ "$status" = "Synced" ]; then + mark_node_complete "$node_type" + fi + fi +} + +# Marks a node as complete and record time +mark_node_complete() { + local node_type=$1 + if [ "${node_completed[$node_type]}" = false ]; then + node_completed[$node_type]=true + node_complete_time[$node_type]=$(date +%s) + echo "${node_type} completed sync in $((node_complete_time[$node_type] - sync_start_time)) seconds" + fi +} + +exit_and_dump_logs() { + local exit_code=$1 + echo "Shutting down..." + $SCRIPT_DIR/../local_testnet/stop_local_testnet.sh $ENCLAVE_NAME + echo "Test completed with exit code $exit_code." + exit $exit_code +} + +# Start the nodes +$SCRIPT_DIR/../local_testnet/start_local_testnet.sh -e $ENCLAVE_NAME -b false -n $CONFIG +if [ $? -ne 0 ]; then + echo "Failed to start local testnet" + exit_and_dump_logs 1 +fi + +# Wait for 10s before stopping non-validating nodes +sleep 10 + +# These are non validating nodes +supernode="cl-3-lighthouse-geth" +fullnode="cl-4-lighthouse-geth" + +# Stop the non-validator nodes +kurtosis service stop $ENCLAVE_NAME $supernode +kurtosis service stop $ENCLAVE_NAME $fullnode + +echo "Non-validator nodes stopped. Waiting ${OFFLINE_DURATION_SECS} seconds..." + +# Display the time every 10s when the nodes are stopped +remaining_time=$OFFLINE_DURATION_SECS +while [ $remaining_time -gt 0 ]; do + sleep 10 + remaining_time=$((remaining_time - 10)) + echo "Nodes are stopped for $((OFFLINE_DURATION_SECS - remaining_time))s, ${remaining_time}s remains..." +done + +echo "Resuming non-validator nodes..." + +# Resume the non validating nodes +kurtosis service start $ENCLAVE_NAME $supernode +kurtosis service start $ENCLAVE_NAME $fullnode + +# The time at which syncing starts after the node was stopped +sync_start_time=$(date +%s) + +# Get beacon API URLs for non validating nodes for query +supernode_url=$(kurtosis port print $ENCLAVE_NAME $supernode http) +fullnode_url=$(kurtosis port print $ENCLAVE_NAME $fullnode http) + +# Initialize statuses +declare -A node_completed +declare -A node_complete_time +declare -A node_urls + +node_urls["supernode"]="$supernode_url" +node_urls["fullnode"]="$fullnode_url" +node_completed["supernode"]=false +node_completed["fullnode"]=false + +echo "Polling sync status until nodes are synced or timeout of ${TIMEOUT_MINS} mins" + +while [ "${node_completed[supernode]}" = false ] || [ "${node_completed[fullnode]}" = false ]; do + current_time=$(date +%s) + elapsed=$((current_time - sync_start_time)) + + if [ "$elapsed" -ge "$TIMEOUT_SECS" ]; then + echo "ERROR: Nodes timed out syncing after ${TIMEOUT_MINS} minutes. Exiting." + exit_and_dump_logs 1 + fi + + # Poll each node that hasn't completed yet + for node in "supernode" "fullnode"; do + if [ "${node_completed[$node]}" = false ]; then + poll_node "$node" + fi + done + + sleep $POLL_INTERVAL_SECS +done + +echo "Genesis sync test complete! Both supernode and fullnode have synced successfully." +echo "Supernode time: $((node_complete_time[supernode] - sync_start_time)) seconds" +echo "Fullnode time: $((node_complete_time[fullnode] - sync_start_time)) seconds" +exit_and_dump_logs 0 \ No newline at end of file diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml index 21114df0e8..0fda1aa34b 100644 --- a/scripts/tests/network_params.yaml +++ b/scripts/tests/network_params.yaml @@ -8,7 +8,7 @@ participants: - --target-peers=3 count: 4 network_params: - deneb_fork_epoch: 0 + electra_fork_epoch: 0 seconds_per_slot: 3 num_validator_keys_per_node: 20 global_log_level: debug diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 77ddceb85f..c61b9b5414 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -147,7 +147,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch; - fn select_db(db: &SlasherDB) -> &Database; + fn select_db(db: &SlasherDB) -> &Database<'_>; fn load( db: &SlasherDB, @@ -290,7 +290,7 @@ impl TargetArrayChunk for MinTargetChunk { start_epoch / chunk_size * chunk_size - 1 } - fn select_db(db: &SlasherDB) -> &Database { + fn select_db(db: &SlasherDB) -> &Database<'_> { &db.databases.min_targets_db } } @@ -389,7 +389,7 @@ impl TargetArrayChunk for MaxTargetChunk { (start_epoch / chunk_size + 1) * chunk_size } - fn select_db(db: &SlasherDB) -> &Database { + fn select_db(db: &SlasherDB) -> &Database<'_> { &db.databases.max_targets_db } } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 071109e00c..d5e0ed5d24 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -331,7 +331,7 @@ impl SlasherDB { Ok(db) } - pub fn begin_rw_txn(&self) -> Result { + pub fn begin_rw_txn(&self) -> Result, Error> { self.env.begin_rw_txn() } diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs index af72006caa..dcbb82fe93 100644 --- a/slasher/src/database/interface.rs +++ b/slasher/src/database/interface.rs @@ -83,7 +83,7 @@ impl Environment { } } - pub fn create_databases(&self) -> Result { + pub fn create_databases(&self) -> Result, Error> { match self { #[cfg(feature = "mdbx")] Self::Mdbx(env) => env.create_databases(), @@ -95,7 +95,7 @@ impl Environment { } } - pub fn begin_rw_txn(&self) -> Result { + pub fn begin_rw_txn(&self) -> Result, Error> { match self { #[cfg(feature = "mdbx")] Self::Mdbx(env) => env.begin_rw_txn().map(RwTransaction::Mdbx), @@ -194,7 +194,7 @@ impl<'env> RwTransaction<'env> { impl Cursor<'_> { /// Return the first key in the current database while advancing the cursor's position. - pub fn first_key(&mut self) -> Result, Error> { + pub fn first_key(&mut self) -> Result>, Error> { match self { #[cfg(feature = "mdbx")] Cursor::Mdbx(cursor) => cursor.first_key(), @@ -207,7 +207,7 @@ impl Cursor<'_> { } /// Return the last key in the current database while advancing the cursor's position. - pub fn last_key(&mut self) -> Result, Error> { + pub fn last_key(&mut self) -> Result>, Error> { match self { #[cfg(feature = "mdbx")] Cursor::Mdbx(cursor) => cursor.last_key(), @@ -219,7 +219,7 @@ impl Cursor<'_> { } } - pub fn next_key(&mut self) -> Result, Error> { + pub fn next_key(&mut self) -> Result>, Error> { match self { #[cfg(feature = "mdbx")] Cursor::Mdbx(cursor) => cursor.next_key(), diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs index 74342968cf..a2ef298830 100644 --- a/slasher/src/database/lmdb_impl.rs +++ b/slasher/src/database/lmdb_impl.rs @@ -41,7 +41,7 @@ impl Environment { Ok(Environment { env }) } - pub fn create_databases(&self) -> Result { + pub fn create_databases(&self) -> Result, Error> { let indexed_attestation_db = self .env .create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; @@ -80,7 +80,7 @@ impl Environment { }) } - pub fn begin_rw_txn(&self) -> Result { + pub fn begin_rw_txn(&self) -> Result, Error> { let txn = self.env.begin_rw_txn()?; Ok(RwTransaction { txn }) } @@ -137,7 +137,7 @@ impl<'env> RwTransaction<'env> { } impl<'env> Cursor<'env> { - pub fn first_key(&mut self) -> Result, Error> { + pub fn first_key(&mut self) -> Result>, Error> { let opt_key = self .cursor .get(None, None, MDB_FIRST) diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index d93f3a5578..f6d6837f71 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [features] # `ef_tests` feature must be enabled to actually run the tests ef_tests = [] +disable_rayon = [] fake_crypto = ["bls/fake_crypto"] portable = ["beacon_chain/portable"] @@ -16,6 +17,8 @@ beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } +context_deserialize = { workspace = true } +context_deserialize_derive = { workspace = true } derivative = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index c3a56ec11a..48afcae4b2 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,44 +1,33 @@ -TESTS_TAG := v1.5.0-beta.4 -TESTS = general minimal mainnet -TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) - +# To download/extract nightly tests, run: +# CONSENSUS_SPECS_TEST_VERSION=nightly make +CONSENSUS_SPECS_TEST_VERSION ?= v1.6.0-alpha.1 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) -BASE_URL := https://github.com/ethereum/$(REPO_NAME)/releases/download/$(TESTS_TAG) BLS_TEST_REPO_NAME := bls12-381-tests -BLS_TEST_TAG := v0.1.1 +BLS_TEST_VERSION := v0.1.1 BLS_TEST = bls_tests_yaml -BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) -BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) +BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_VERSION) -CURL := $(if $(LIGHTHOUSE_GITHUB_TOKEN),curl -L --header "Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",curl -L) +.PHONY: all clean -all: - make $(OUTPUT_DIR) - make $(BLS_OUTPUT_DIR) +all: clean $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) -$(OUTPUT_DIR): $(TARBALLS) - mkdir $(OUTPUT_DIR) - for test_tarball in $^; do \ - tar -xzf $$test_tarball -C $(OUTPUT_DIR);\ +clean: + rm -rf *.tar.gz $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) + +$(OUTPUT_DIR): + mkdir -p $(OUTPUT_DIR) + ./download_test_vectors.sh $(CONSENSUS_SPECS_TEST_VERSION) + for test_tarball in *.tar.gz; do \ + tar -xzf $$test_tarball -C $(OUTPUT_DIR); \ + rm -f $$test_tarball; \ done $(BLS_OUTPUT_DIR): - mkdir $(BLS_OUTPUT_DIR) - $(CURL) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -o $(BLS_TARBALL) - tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) - -%-$(TESTS_TAG).tar.gz: - $(CURL) $(BASE_URL)/$*.tar.gz -o $@ - -clean-test-files: - rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) - -clean-archives: - rm -f $(TARBALLS) $(BLS_TARBALL) - -clean: clean-test-files clean-archives - -.PHONY: clean clean-archives clean-test-files + mkdir -p $(BLS_OUTPUT_DIR) + curl --progress-bar --location --remote-name --show-error --retry 3 --retry-all-errors --fail \ + $(BLS_BASE_URL)/$(BLS_TEST).tar.gz + tar -xzf *.tar.gz -C $(BLS_OUTPUT_DIR) + rm -f *.tar.gz diff --git a/testing/ef_tests/README.md b/testing/ef_tests/README.md index 5ffd453d99..b04cd25dc7 100644 --- a/testing/ef_tests/README.md +++ b/testing/ef_tests/README.md @@ -28,6 +28,16 @@ $ cargo test --features ef_tests The tests won't run without the `ef_tests` feature enabled (this is to ensure that a top-level `cargo test --all` won't fail on missing files). +The following is sometimes necessary to avoid stack overflow issues when running on MacOS: +``` +$ export RUST_MIN_STACK=8388608 +``` + +When debugging failing tests, it's often useful to disable parallization and output suppression: +``` +$ cargo test --features ef_tests,disable_rayon -- --nocapture +``` + ## Saving Space When you download the tests, the downloaded archives will be kept in addition to the extracted diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 3aeff8ce06..d7568d854f 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -45,13 +45,13 @@ excluded_paths = [ "bls12-381-tests/deserialization_G1", "bls12-381-tests/deserialization_G2", "bls12-381-tests/hash_to_G2", - "tests/.*/eip6110", - "tests/.*/whisk", - # TODO(das): Fulu tests are ignored for now - "tests/.*/fulu", - "tests/.*/fulu/ssz_static/MatrixEntry", - "tests/.*/eip7441", "tests/.*/eip7732", + "tests/.*/eip7805", + # Ignore MatrixEntry SSZ tests for now. + "tests/.*/fulu/ssz_static/MatrixEntry/.*", + # Ignore full epoch tests for now (just test the sub-transitions). + "tests/.*/.*/epoch_processing/.*/pre_epoch.ssz_snappy", + "tests/.*/.*/epoch_processing/.*/post_epoch.ssz_snappy", ] diff --git a/testing/ef_tests/download_test_vectors.sh b/testing/ef_tests/download_test_vectors.sh new file mode 100755 index 0000000000..7297f7eeb8 --- /dev/null +++ b/testing/ef_tests/download_test_vectors.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +TESTS=("general" "minimal" "mainnet") + +version=${1} +if [[ "$version" == "nightly" ]]; then + if [[ -z "${GITHUB_TOKEN:-}" ]]; then + echo "Error GITHUB_TOKEN is not set" + exit 1 + fi + + for cmd in unzip jq; do + if ! command -v "${cmd}" >/dev/null 2>&1; then + echo "Error ${cmd} is not installed" + exit 1 + fi + done + + repo="ethereum/consensus-specs" + api="https://api.github.com" + auth_header="Authorization: token ${GITHUB_TOKEN}" + + run_id=$(curl -s -H "${auth_header}" \ + "${api}/repos/${repo}/actions/workflows/generate_vectors.yml/runs?branch=dev&status=success&per_page=1" | + jq -r '.workflow_runs[0].id') + + if [[ "${run_id}" == "null" || -z "${run_id}" ]]; then + echo "No successful nightly workflow run found" + exit 1 + fi + + echo "Downloading nightly test vectors for run: ${run_id}" + curl -s -H "${auth_header}" "${api}/repos/${repo}/actions/runs/${run_id}/artifacts" | + jq -c '.artifacts[] | {name, url: .archive_download_url}' | + while read -r artifact; do + name=$(echo "${artifact}" | jq -r .name) + url=$(echo "${artifact}" | jq -r .url) + + if [[ "$name" == "consensustestgen.log" ]]; then + continue + fi + + echo "Downloading artifact: ${name}" + curl --progress-bar --location --show-error --retry 3 --retry-all-errors --fail \ + -H "${auth_header}" -H "Accept: application/vnd.github+json" \ + --output "${name}.zip" "${url}" || { + echo "Failed to download ${name}" + exit 1 + } + + unzip -qo "${name}.zip" + rm -f "${name}.zip" + done +else + for test in "${TESTS[@]}"; do + if [[ ! -e "${test}.tar.gz" ]]; then + echo "Downloading: ${version}/${test}.tar.gz" + curl --progress-bar --location --remote-name --show-error --retry 3 --retry-all-errors --fail \ + "https://github.com/ethereum/consensus-spec-tests/releases/download/${version}/${test}.tar.gz" \ + || { + echo "Curl failed. Aborting" + rm -f "${test}.tar.gz" + exit 1 + } + fi + done +fi diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 31662e831a..b6f7cb21a1 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -22,6 +22,7 @@ mod genesis_validity; mod get_custody_groups; mod kzg_blob_to_kzg_commitment; mod kzg_compute_blob_kzg_proof; +mod kzg_compute_cells; mod kzg_compute_cells_and_kzg_proofs; mod kzg_compute_kzg_proof; mod kzg_recover_cells_and_kzg_proofs; @@ -49,7 +50,7 @@ pub use bls_eth_fast_aggregate_verify::*; pub use bls_fast_aggregate_verify::*; pub use bls_sign_msg::*; pub use bls_verify_msg::*; -pub use common::SszStaticType; +pub use common::{DataColumnsByRootIdentifierWrapper, SszStaticType}; pub use compute_columns_for_custody_groups::*; pub use epoch_processing::*; pub use fork::ForkTest; @@ -58,6 +59,7 @@ pub use genesis_validity::*; pub use get_custody_groups::*; pub use kzg_blob_to_kzg_commitment::*; pub use kzg_compute_blob_kzg_proof::*; +pub use kzg_compute_cells::*; pub use kzg_compute_cells_and_kzg_proofs::*; pub use kzg_compute_kzg_proof::*; pub use kzg_recover_cells_and_kzg_proofs::*; @@ -91,29 +93,29 @@ pub use transition::TransitionTest; /// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { - // TODO(fulu): to be removed once we start using Fulu types for test vectors. - // Existing SSZ types for PeerDAS (Fulu) are the same as Electra, so the test vectors get - // loaded as Electra types (default serde behaviour for untagged enums). - Fulu, + // Placeholder for future feature-gated forks + // Add new feature-gated forks here before they are incorporated into a main fork + #[doc(hidden)] + __Placeholder, } impl FeatureName { pub fn list_all() -> Vec { - vec![FeatureName::Fulu] + vec![] } /// `ForkName` to use when running the feature tests. pub fn fork_name(&self) -> ForkName { match self { - FeatureName::Fulu => ForkName::Electra, + FeatureName::__Placeholder => unreachable!("Placeholder variant should never be used"), } } } impl Display for FeatureName { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, _f: &mut Formatter<'_>) -> std::fmt::Result { match self { - FeatureName::Fulu => f.write_str("fulu"), + FeatureName::__Placeholder => unreachable!("Placeholder variant should never be used"), } } } diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 62f834820f..f63380cc33 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -1,8 +1,11 @@ -use serde::Deserialize; +use context_deserialize::ContextDeserialize; +use serde::{Deserialize, Deserializer}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::fmt::Debug; -use types::ForkName; +use std::marker::PhantomData; +use tree_hash::TreeHash; +use types::{DataColumnsByRootIdentifier, EthSpec, ForkName, Hash256}; /// Macro to wrap U128 and U256 so they deserialize correctly. macro_rules! uint_wrapper { @@ -40,6 +43,15 @@ macro_rules! uint_wrapper { self.x.tree_hash_root() } } + + impl<'de, T> ContextDeserialize<'de, T> for $wrapper_name { + fn context_deserialize(deserializer: D, _context: T) -> Result + where + D: Deserializer<'de>, + { + <$wrapper_name>::deserialize(deserializer) + } + } }; } @@ -47,26 +59,63 @@ uint_wrapper!(DecimalU128, alloy_primitives::U128); uint_wrapper!(DecimalU256, alloy_primitives::U256); /// Trait for types that can be used in SSZ static tests. -pub trait SszStaticType: - serde::de::DeserializeOwned + Encode + Clone + PartialEq + Debug + Sync -{ +pub trait SszStaticType: Encode + Clone + PartialEq + Debug + Sync {} + +impl SszStaticType for T where T: Encode + Clone + PartialEq + Debug + Sync {} + +/// We need the `EthSpec` to implement `LoadCase` for this type, in order to work out the +/// ChainSpec. +/// +/// No other type currently requires this kind of context. +#[derive(Debug, Encode, Clone, PartialEq)] +#[ssz(struct_behaviour = "transparent")] +pub struct DataColumnsByRootIdentifierWrapper { + pub value: DataColumnsByRootIdentifier, + // SSZ derive is a bit buggy and requires skip_deserializing for transparent to work. + #[ssz(skip_serializing, skip_deserializing)] + pub _phantom: PhantomData, } -impl SszStaticType for T where - T: serde::de::DeserializeOwned + Encode + Clone + PartialEq + Debug + Sync +impl<'de, E: EthSpec> ContextDeserialize<'de, (ForkName, usize)> + for DataColumnsByRootIdentifierWrapper { + fn context_deserialize(deserializer: D, context: (ForkName, usize)) -> Result + where + D: Deserializer<'de>, + { + let value = DataColumnsByRootIdentifier::context_deserialize(deserializer, context)?; + Ok(DataColumnsByRootIdentifierWrapper { + value, + _phantom: PhantomData, + }) + } } -/// Return the fork immediately prior to a fork. -pub fn previous_fork(fork_name: ForkName) -> ForkName { - match fork_name { - ForkName::Base => ForkName::Base, - ForkName::Altair => ForkName::Base, - ForkName::Bellatrix => ForkName::Altair, - ForkName::Capella => ForkName::Bellatrix, - ForkName::Deneb => ForkName::Capella, - ForkName::Electra => ForkName::Deneb, - ForkName::Fulu => ForkName::Electra, +// We can delete this if we ever get `tree_hash(struct_behaviour = "transparent")`. +impl TreeHash for DataColumnsByRootIdentifierWrapper { + fn tree_hash_type() -> tree_hash::TreeHashType { + DataColumnsByRootIdentifier::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.value.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + DataColumnsByRootIdentifier::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + self.value.tree_hash_root() + } +} + +impl From for DataColumnsByRootIdentifierWrapper { + fn from(value: DataColumnsByRootIdentifier) -> Self { + Self { + value, + _phantom: PhantomData, + } } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index e05225c171..0dc5e7ab11 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -11,7 +11,7 @@ use state_processing::per_epoch_processing::effective_balance_updates::{ process_effective_balance_updates, process_effective_balance_updates_slow, }; use state_processing::per_epoch_processing::single_pass::{ - process_epoch_single_pass, SinglePassConfig, + process_epoch_single_pass, process_proposer_lookahead, SinglePassConfig, }; use state_processing::per_epoch_processing::{ altair, base, @@ -77,6 +77,8 @@ pub struct SyncCommitteeUpdates; pub struct InactivityUpdates; #[derive(Debug)] pub struct ParticipationFlagUpdates; +#[derive(Debug)] +pub struct ProposerLookahead; type_name!( JustificationAndFinalization, @@ -97,6 +99,7 @@ type_name!(ParticipationRecordUpdates, "participation_record_updates"); type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); type_name!(ParticipationFlagUpdates, "participation_flag_updates"); +type_name!(ProposerLookahead, "proposer_lookahead"); impl EpochTransition for JustificationAndFinalization { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { @@ -280,6 +283,16 @@ impl EpochTransition for ParticipationFlagUpdates { } } +impl EpochTransition for ProposerLookahead { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + if state.fork_name_unchecked().fulu_enabled() { + process_proposer_lookahead(state, spec) + } else { + Ok(()) + } + } +} + impl> LoadCase for EpochProcessing { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -338,6 +351,11 @@ impl> Case for EpochProcessing { { return false; } + + if !fork_name.fulu_enabled() && T::name() == "proposer_lookahead" { + return false; + } + true } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 85301e22f6..cae4fcf405 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -1,6 +1,5 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::upgrade::{ @@ -33,7 +32,10 @@ impl LoadCase for ForkTest { assert_eq!(metadata.fork_name(), fork_name); // Decode pre-state with previous fork. - let pre_spec = &previous_fork(fork_name).make_genesis_spec(E::default_spec()); + let pre_spec = &fork_name + .previous_fork() + .unwrap_or(ForkName::Base) + .make_genesis_spec(E::default_spec()); let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), pre_spec)?; // Decode post-state with target fork. diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index b507383190..af3b0bce2d 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -520,7 +520,7 @@ impl Tester { let result: Result, _> = self .block_on_dangerous(self.harness.chain.process_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), block.clone(), 0), + RpcBlock::new_without_blobs(Some(block_root), block.clone()), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/testing/ef_tests/src/cases/kzg_compute_cells.rs b/testing/ef_tests/src/cases/kzg_compute_cells.rs new file mode 100644 index 0000000000..bd7f3649d6 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_compute_cells.rs @@ -0,0 +1,54 @@ +use super::*; +use crate::case_result::compare_result; +use kzg::Cell; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGComputeCellsInput { + pub blob: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGComputeCells { + pub input: KZGComputeCellsInput, + pub output: Option>, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for KZGComputeCells { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl Case for KZGComputeCells { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let cells = parse_blob::(&self.input.blob) + .and_then(|blob| { + let blob = blob.as_ref().try_into().map_err(|e| { + Error::InternalError(format!("Failed to convert blob to kzg blob: {e:?}")) + })?; + let kzg = get_kzg(); + kzg.compute_cells(blob).map_err(|e| { + Error::InternalError(format!("Failed to compute cells and kzg proofs: {e:?}")) + }) + }) + .map(|cells| cells.to_vec()); + + let expected = self.output.as_ref().map(|cells| { + parse_cells_and_proofs(cells, &[]) + .map(|(cells, _)| cells) + .expect("Valid cells") + }); + + compare_result::, _>(&cells, &expected) + } +} diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 7178edb151..80aa9de6f9 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -24,8 +24,8 @@ use state_processing::{ use std::fmt::Debug; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, - BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconState, BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, ForkVersionDecode, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, WithdrawalRequest, }; @@ -357,8 +357,8 @@ impl Operation for BeaconBlockBody> { BeaconBlockBody::Electra(inner.clone_as_blinded()) } ForkName::Fulu => { - let inner = >>::from_ssz_bytes(bytes)?; - BeaconBlockBody::Electra(inner.clone_as_blinded()) + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Fulu(inner.clone_as_blinded()) } _ => panic!(), }) diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 3dc2f17968..96627472ba 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -3,7 +3,9 @@ use super::*; use crate::cases::common::{DecimalU128, DecimalU256, SszStaticType}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; -use crate::decode::{log_file_access, snappy_decode_file, yaml_decode_file}; +use crate::decode::{context_yaml_decode_file, log_file_access, snappy_decode_file}; +use context_deserialize::ContextDeserialize; +use context_deserialize_derive::context_deserialize; use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use ssz_derive::{Decode, Encode}; use tree_hash::TreeHash; @@ -12,6 +14,7 @@ use types::typenum::*; use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; #[derive(Debug, Clone, Deserialize)] +#[context_deserialize(ForkName)] struct Metadata { root: String, #[serde(rename(deserialize = "signing_root"))] @@ -118,7 +121,7 @@ macro_rules! type_dispatch { } impl Case for SszGeneric { - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let parts = self.case_name.split('_').collect::>(); match self.handler_name.as_str() { @@ -134,7 +137,7 @@ impl Case for SszGeneric { type_dispatch!( ssz_generic_test, - (&self.path), + (&self.path, fork_name), Vector, <>, [elem_ty => primitive_type] @@ -142,7 +145,7 @@ impl Case for SszGeneric { )?; type_dispatch!( ssz_generic_test, - (&self.path), + (&self.path, fork_name), FixedVector, <>, [elem_ty => primitive_type] @@ -159,7 +162,7 @@ impl Case for SszGeneric { type_dispatch!( ssz_generic_test, - (&self.path), + (&self.path, fork_name), BitList, <>, [limit => typenum] @@ -170,21 +173,21 @@ impl Case for SszGeneric { type_dispatch!( ssz_generic_test, - (&self.path), + (&self.path, fork_name), BitVector, <>, [length => typenum] )?; } "boolean" => { - ssz_generic_test::(&self.path)?; + ssz_generic_test::(&self.path, fork_name)?; } "uints" => { let type_name = "uint".to_owned() + parts[1]; type_dispatch!( ssz_generic_test, - (&self.path), + (&self.path, fork_name), _, <>, [type_name.as_str() => primitive_type] @@ -195,7 +198,7 @@ impl Case for SszGeneric { type_dispatch!( ssz_generic_test, - (&self.path), + (&self.path, fork_name), _, <>, [type_name => test_container] @@ -207,10 +210,15 @@ impl Case for SszGeneric { } } -fn ssz_generic_test(path: &Path) -> Result<(), Error> { +fn ssz_generic_test< + T: SszStaticType + for<'de> ContextDeserialize<'de, ForkName> + TreeHash + ssz::Decode, +>( + path: &Path, + fork_name: ForkName, +) -> Result<(), Error> { let meta_path = path.join("meta.yaml"); let meta: Option = if meta_path.is_file() { - Some(yaml_decode_file(&meta_path)?) + Some(context_yaml_decode_file(&meta_path, fork_name)?) } else { None }; @@ -220,7 +228,7 @@ fn ssz_generic_test(path: &Path) -> R let value_path = path.join("value.yaml"); let value: Option = if value_path.is_file() { - Some(yaml_decode_file(&value_path)?) + Some(context_yaml_decode_file(&value_path, fork_name)?) } else { None }; @@ -246,17 +254,20 @@ fn ssz_generic_test(path: &Path) -> R // Containers for SSZ generic tests #[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +#[context_deserialize(ForkName)] struct SingleFieldTestStruct { A: u8, } #[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +#[context_deserialize(ForkName)] struct SmallTestStruct { A: u16, B: u16, } #[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +#[context_deserialize(ForkName)] struct FixedTestStruct { A: u8, B: u64, @@ -264,6 +275,7 @@ struct FixedTestStruct { } #[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +#[context_deserialize(ForkName)] struct VarTestStruct { A: u16, B: VariableList, @@ -271,6 +283,7 @@ struct VarTestStruct { } #[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +#[context_deserialize(ForkName)] struct ComplexTestStruct { A: u16, B: VariableList, @@ -283,6 +296,7 @@ struct ComplexTestStruct { } #[derive(Debug, Clone, PartialEq, Decode, Encode, TreeHash, Deserialize)] +#[context_deserialize(ForkName)] struct BitsStruct { A: BitList, B: BitVector, diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index c80977a8ac..b02b9597bb 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -1,10 +1,12 @@ use super::*; use crate::case_result::compare_result; -use crate::decode::{snappy_decode_file, yaml_decode_file}; +use crate::cases::common::DataColumnsByRootIdentifierWrapper; +use crate::decode::{context_yaml_decode_file, snappy_decode_file, yaml_decode_file}; +use context_deserialize::ContextDeserialize; use serde::Deserialize; use ssz::Decode; use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock}; +use types::{BeaconBlock, BeaconState, DataColumnsByRootIdentifier, Hash256, SignedBeaconBlock}; #[derive(Debug, Clone, Deserialize)] struct SszStaticRoots { @@ -37,18 +39,28 @@ pub struct SszStaticWithSpec { value: T, } -fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { +fn load_from_dir ContextDeserialize<'de, ForkName>>( + path: &Path, + fork_name: ForkName, +) -> Result<(SszStaticRoots, Vec, T), Error> { + load_from_dir_with_context(path, fork_name) +} + +fn load_from_dir_with_context ContextDeserialize<'de, C>, C>( + path: &Path, + context: C, +) -> Result<(SszStaticRoots, Vec, T), Error> { let roots = yaml_decode_file(&path.join("roots.yaml"))?; let serialized = snappy_decode_file(&path.join("serialized.ssz_snappy")) .expect("serialized.ssz_snappy exists"); - let value = yaml_decode_file(&path.join("value.yaml"))?; + let value = context_yaml_decode_file(&path.join("value.yaml"), context)?; Ok((roots, serialized, value)) } -impl LoadCase for SszStatic { - fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { - load_from_dir(path).map(|(roots, serialized, value)| Self { +impl ContextDeserialize<'de, ForkName>> LoadCase for SszStatic { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + load_from_dir(path, fork_name).map(|(roots, serialized, value)| Self { roots, serialized, value, @@ -56,19 +68,9 @@ impl LoadCase for SszStatic { } } -impl LoadCase for SszStaticTHC { - fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { - load_from_dir(path).map(|(roots, serialized, value)| Self { - roots, - serialized, - value, - }) - } -} - -impl LoadCase for SszStaticWithSpec { - fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { - load_from_dir(path).map(|(roots, serialized, value)| Self { +impl ContextDeserialize<'de, ForkName>> LoadCase for SszStaticTHC { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + load_from_dir(path, fork_name).map(|(roots, serialized, value)| Self { roots, serialized, value, @@ -124,6 +126,16 @@ impl Case for SszStaticTHC> { } } +impl LoadCase for SszStaticWithSpec> { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + load_from_dir(path, fork_name).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) + } +} + impl Case for SszStaticWithSpec> { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let spec = &testing_spec::(fork_name); @@ -135,6 +147,16 @@ impl Case for SszStaticWithSpec> { } } +impl LoadCase for SszStaticWithSpec> { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + load_from_dir(path, fork_name).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) + } +} + impl Case for SszStaticWithSpec> { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let spec = &testing_spec::(fork_name); @@ -145,3 +167,27 @@ impl Case for SszStaticWithSpec> { Ok(()) } } + +impl LoadCase for SszStaticWithSpec> { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); + let context = (fork_name, spec.number_of_columns as usize); + load_from_dir_with_context(path, context).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) + } +} + +impl Case for SszStaticWithSpec> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); + check_serialization(&self.value, &self.serialized, |bytes| { + DataColumnsByRootIdentifier::from_ssz_bytes(bytes, spec.number_of_columns as usize) + .map(Into::into) + })?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_slice())?; + Ok(()) + } +} diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index eb88ac6af1..2074ffce23 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -1,4 +1,5 @@ use super::*; +use context_deserialize::ContextDeserialize; use fs2::FileExt; use snap::raw::Decoder; use std::fs::{self}; @@ -35,6 +36,27 @@ pub fn yaml_decode(string: &str) -> Result(string: &'de str, context: C) -> Result +where + T: ContextDeserialize<'de, C>, +{ + let deserializer = serde_yaml::Deserializer::from_str(string); + T::context_deserialize(deserializer, context) + .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) +} + +pub fn context_yaml_decode_file(path: &Path, context: C) -> Result +where + T: for<'de> ContextDeserialize<'de, C>, +{ + log_file_access(path); + fs::read_to_string(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| context_yaml_decode(&s, context)) +} + pub fn yaml_decode_file(path: &Path) -> Result { log_file_access(path); fs::read_to_string(path) diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index a375498239..fd2bea6e8e 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -1,6 +1,7 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name::TypeName; use crate::{type_name, FeatureName}; +use context_deserialize::ContextDeserialize; use derivative::Derivative; use std::fs::{self, DirEntry}; use std::marker::PhantomData; @@ -21,7 +22,7 @@ pub trait Handler { // Add forks here to exclude them from EF spec testing. Helpful for adding future or // unspecified forks. fn disabled_forks(&self) -> Vec { - vec![ForkName::Fulu] + vec![] } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { @@ -50,6 +51,19 @@ pub trait Handler { } } + // Do NOT override this function. + // TODO: use default keyword when stable. + fn rayon_enabled() -> bool { + #[cfg(feature = "disable_rayon")] + { + false + } + #[cfg(not(feature = "disable_rayon"))] + { + Self::use_rayon() + } + } + fn use_rayon() -> bool { true } @@ -85,7 +99,7 @@ pub trait Handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); + let results = Cases { test_cases }.test_results(fork_name, Self::rayon_enabled()); let name = format!( "{}/{}/{}", @@ -127,7 +141,7 @@ pub trait Handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); + let results = Cases { test_cases }.test_results(fork_name, Self::rayon_enabled()); let name = format!( "{}/{}/{}", @@ -205,7 +219,7 @@ macro_rules! bls_handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); + let results = Cases { test_cases }.test_results(fork_name, Self::rayon_enabled()); let name = format!( "{}/{}/{}", @@ -327,13 +341,37 @@ impl SszStaticHandler { pub struct SszStaticTHCHandler(PhantomData<(T, E)>); /// Handler for SSZ types that don't implement `ssz::Decode`. -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct SszStaticWithSpecHandler(PhantomData<(T, E)>); +pub struct SszStaticWithSpecHandler { + supported_forks: Vec, + _phantom: PhantomData<(T, E)>, +} + +impl Default for SszStaticWithSpecHandler { + fn default() -> Self { + Self::for_forks(ForkName::list_all()) + } +} + +impl SszStaticWithSpecHandler { + pub fn for_forks(supported_forks: Vec) -> Self { + SszStaticWithSpecHandler { + supported_forks, + _phantom: PhantomData, + } + } + + pub fn fulu_and_later() -> Self { + Self::for_forks(ForkName::list_all()[6..].to_vec()) + } +} impl Handler for SszStaticHandler where - T: cases::SszStaticType + tree_hash::TreeHash + ssz::Decode + TypeName, + T: cases::SszStaticType + + for<'de> ContextDeserialize<'de, ForkName> + + tree_hash::TreeHash + + ssz::Decode + + TypeName, E: TypeName, { type Case = cases::SszStatic; @@ -353,25 +391,6 @@ where fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { self.supported_forks.contains(&fork_name) } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - // TODO(fulu): to be removed once Fulu types start differing from Electra. We currently run Fulu tests as a - // "feature" - this means we use Electra types for Fulu SSZ tests (except for PeerDAS types, e.g. `DataColumnSidecar`). - // - // This ensures we only run the tests **once** for `Fulu`, using the types matching the - // correct fork, e.g. `Fulu` uses SSZ types from `Electra` as of spec test version - // `v1.5.0-beta.0`, therefore the `Fulu` tests should get included when testing Deneb types. - // - // e.g. Fulu test vectors are executed in the 2nd line below, but excluded in the 1st - // line when testing the type `AttestationElectra`: - // - // ``` - // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); - // ``` - feature_name == FeatureName::Fulu - && self.supported_forks.contains(&feature_name.fork_name()) - } } impl Handler for SszStaticTHCHandler, E> @@ -391,10 +410,6 @@ where fn handler_name(&self) -> String { BeaconState::::name().into() } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu - } } impl Handler for SszStaticWithSpecHandler @@ -417,8 +432,8 @@ where T::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + self.supported_forks.contains(&fork_name) } } @@ -898,10 +913,6 @@ impl Handler for GetCustodyGroupsHandler { fn handler_name(&self) -> String { "get_custody_groups".into() } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu - } } #[derive(Derivative)] @@ -922,9 +933,25 @@ impl Handler for ComputeColumnsForCustodyGroupHandler fn handler_name(&self) -> String { "compute_columns_for_custody_group".into() } +} - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGComputeCellsHandler(PhantomData); + +impl Handler for KZGComputeCellsHandler { + type Case = cases::KZGComputeCells; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "compute_cells".into() } } @@ -946,10 +973,6 @@ impl Handler for KZGComputeCellsAndKZGProofHandler { fn handler_name(&self) -> String { "compute_cells_and_kzg_proofs".into() } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu - } } #[derive(Derivative)] @@ -970,10 +993,6 @@ impl Handler for KZGVerifyCellKZGProofBatchHandler { fn handler_name(&self) -> String { "verify_cell_kzg_proof_batch".into() } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu - } } #[derive(Derivative)] @@ -994,10 +1013,6 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { fn handler_name(&self) -> String { "recover_cells_and_kzg_proofs".into() } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu - } } #[derive(Derivative)] @@ -1022,10 +1037,6 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { fork_name.deneb_enabled() } - - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu - } } #[derive(Derivative)] @@ -1073,7 +1084,8 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - fork_name.altair_enabled() + // No test in Fulu yet. + fork_name.altair_enabled() && fork_name != ForkName::Fulu } } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index e7367719d7..a2d905738e 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,11 +1,11 @@ pub use case_result::CaseResult; pub use cases::WithdrawalsPayload; pub use cases::{ - Case, EffectiveBalanceUpdates, Eth1DataReset, FeatureName, HistoricalRootsUpdate, - HistoricalSummariesUpdate, InactivityUpdates, JustificationAndFinalization, - ParticipationFlagUpdates, ParticipationRecordUpdates, PendingBalanceDeposits, - PendingConsolidations, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, - SlashingsReset, SyncCommitteeUpdates, + Case, DataColumnsByRootIdentifierWrapper, EffectiveBalanceUpdates, Eth1DataReset, FeatureName, + HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, + JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, + PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, RandaoMixesReset, + RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 387e77310d..b5b2c424d8 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use crate::DataColumnsByRootIdentifierWrapper; use types::historical_summary::HistoricalSummary; use types::*; @@ -59,6 +60,10 @@ type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(BlobIdentifier); type_name!(DataColumnsByRootIdentifier); +type_name_generic!( + DataColumnsByRootIdentifierWrapper, + "DataColumnsByRootIdentifier" +); type_name_generic!(BlobSidecar); type_name_generic!(DataColumnSidecar); type_name!(Checkpoint); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index d333cdbb11..b6264f2e08 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -238,7 +238,8 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{ - FeatureName, Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler, + DataColumnsByRootIdentifierWrapper, Handler, SszStaticHandler, SszStaticTHCHandler, + SszStaticWithSpecHandler, }; use types::historical_summary::HistoricalSummary; use types::{ @@ -660,20 +661,24 @@ mod ssz_static { #[test] fn data_column_sidecar() { - SszStaticHandler::, MinimalEthSpec>::default() - .run_for_feature(FeatureName::Fulu); - SszStaticHandler::, MainnetEthSpec>::default() - .run_for_feature(FeatureName::Fulu); + SszStaticHandler::, MinimalEthSpec>::fulu_and_later() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_and_later() + .run(); } #[test] - #[ignore] - // TODO(das): enable once EF tests are updated to latest release. fn data_column_by_root_identifier() { - // SszStaticHandler::::default() - // .run_for_feature(FeatureName::Fulu); - // SszStaticHandler::::default() - // .run_for_feature(FeatureName::Fulu); + SszStaticWithSpecHandler::< + DataColumnsByRootIdentifierWrapper, + MinimalEthSpec, + >::fulu_and_later() + .run(); + SszStaticWithSpecHandler::< + DataColumnsByRootIdentifierWrapper, + MainnetEthSpec, + >::fulu_and_later() + .run(); } #[test] @@ -828,6 +833,12 @@ fn epoch_processing_participation_flag_updates() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_proposer_lookahead() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn fork_upgrade() { ForkHandler::::default().run(); @@ -941,6 +952,11 @@ fn kzg_verify_kzg_proof() { KZGVerifyKZGProofHandler::::default().run(); } +#[test] +fn kzg_compute_cells() { + KZGComputeCellsHandler::::default().run(); +} + #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default().run(); diff --git a/testing/eth1_test_rig/.gitignore b/testing/eth1_test_rig/.gitignore deleted file mode 100644 index 81b46ff033..0000000000 --- a/testing/eth1_test_rig/.gitignore +++ /dev/null @@ -1 +0,0 @@ -contract/ diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml deleted file mode 100644 index 9b0ac5ec9b..0000000000 --- a/testing/eth1_test_rig/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "eth1_test_rig" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[dependencies] -deposit_contract = { workspace = true } -ethers-contract = "1.0.2" -ethers-core = { workspace = true } -ethers-providers = { workspace = true } -hex = { workspace = true } -serde_json = { workspace = true } -tokio = { workspace = true } -types = { workspace = true } -unused_port = { workspace = true } diff --git a/testing/eth1_test_rig/src/anvil.rs b/testing/eth1_test_rig/src/anvil.rs deleted file mode 100644 index c6c37ae4a7..0000000000 --- a/testing/eth1_test_rig/src/anvil.rs +++ /dev/null @@ -1,100 +0,0 @@ -use ethers_core::utils::{Anvil, AnvilInstance}; -use ethers_providers::{Http, Middleware, Provider}; -use serde_json::json; -use unused_port::unused_tcp4_port; - -/// Provides a dedicated `anvil` instance. -/// -/// Requires that `anvil` is installed and available on `PATH`. -pub struct AnvilCliInstance { - pub port: u16, - pub anvil: AnvilInstance, - pub client: Provider, - chain_id: u64, -} - -impl AnvilCliInstance { - fn new_from_child(anvil_instance: Anvil, chain_id: u64, port: u16) -> Result { - let client = Provider::::try_from(&endpoint(port)) - .map_err(|e| format!("Failed to start HTTP transport connected to anvil: {:?}", e))?; - Ok(Self { - port, - anvil: anvil_instance.spawn(), - client, - chain_id, - }) - } - pub fn new(chain_id: u64) -> Result { - let port = unused_tcp4_port()?; - - let anvil = Anvil::new() - .port(port) - .mnemonic("vast thought differ pull jewel broom cook wrist tribe word before omit") - .arg("--balance") - .arg("1000000000") - .arg("--gas-limit") - .arg("1000000000") - .arg("--accounts") - .arg("10") - .arg("--chain-id") - .arg(format!("{}", chain_id)); - - Self::new_from_child(anvil, chain_id, port) - } - - pub fn fork(&self) -> Result { - let port = unused_tcp4_port()?; - - let anvil = Anvil::new() - .port(port) - .arg("--chain-id") - .arg(format!("{}", self.chain_id())) - .fork(self.endpoint()); - - Self::new_from_child(anvil, self.chain_id, port) - } - - /// Returns the endpoint that this instance is listening on. - pub fn endpoint(&self) -> String { - endpoint(self.port) - } - - /// Returns the chain id of the anvil instance - pub fn chain_id(&self) -> u64 { - self.chain_id - } - - /// Increase the timestamp on future blocks by `increase_by` seconds. - pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { - self.client - .request("evm_increaseTime", vec![json!(increase_by)]) - .await - .map(|_json_value: u64| ()) - .map_err(|e| format!("Failed to increase time on EVM (is this anvil?): {:?}", e)) - } - - /// Returns the current block number, as u64 - pub async fn block_number(&self) -> Result { - self.client - .get_block_number() - .await - .map(|v| v.as_u64()) - .map_err(|e| format!("Failed to get block number: {:?}", e)) - } - - /// Mines a single block. - pub async fn evm_mine(&self) -> Result<(), String> { - self.client - .request("evm_mine", ()) - .await - .map(|_: String| ()) - .map_err(|_| { - "utils should mine new block with evm_mine (only works with anvil/ganache!)" - .to_string() - }) - } -} - -fn endpoint(port: u16) -> String { - format!("http://127.0.0.1:{}", port) -} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs deleted file mode 100644 index 3cba908261..0000000000 --- a/testing/eth1_test_rig/src/lib.rs +++ /dev/null @@ -1,301 +0,0 @@ -//! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. -//! -//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil) to simulate -//! the deposit contract for testing beacon node eth1 integration. -//! -//! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be -//! some initial issues. -mod anvil; - -use anvil::AnvilCliInstance; -use deposit_contract::{ - encode_eth1_tx_data, testnet, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, -}; -use ethers_contract::Contract; -use ethers_core::{ - abi::Abi, - types::{transaction::eip2718::TypedTransaction, Address, Bytes, TransactionRequest, U256}, -}; -pub use ethers_providers::{Http, Middleware, Provider}; -use std::time::Duration; -use tokio::time::sleep; -use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; -use types::{DepositData, FixedBytesExtended}; - -pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; -pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; - -/// Provides a dedicated anvil instance with the deposit contract already deployed. -pub struct AnvilEth1Instance { - pub anvil: AnvilCliInstance, - pub deposit_contract: DepositContract, -} - -impl AnvilEth1Instance { - pub async fn new(chain_id: u64) -> Result { - let anvil = AnvilCliInstance::new(chain_id)?; - DepositContract::deploy(anvil.client.clone(), 0, None) - .await - .map(|deposit_contract| Self { - anvil, - deposit_contract, - }) - } - - pub fn endpoint(&self) -> String { - self.anvil.endpoint() - } - - pub fn json_rpc_client(&self) -> Provider { - self.anvil.client.clone() - } -} - -/// Deploys and provides functions for the eth2 deposit contract, deployed on the eth1 chain. -#[derive(Clone, Debug)] -pub struct DepositContract { - client: Provider, - contract: Contract>, -} - -impl DepositContract { - pub async fn deploy( - client: Provider, - confirmations: usize, - password: Option, - ) -> Result { - Self::deploy_bytecode(client, confirmations, BYTECODE, ABI, password).await - } - - pub async fn deploy_testnet( - client: Provider, - confirmations: usize, - password: Option, - ) -> Result { - Self::deploy_bytecode( - client, - confirmations, - testnet::BYTECODE, - testnet::ABI, - password, - ) - .await - } - - async fn deploy_bytecode( - client: Provider, - confirmations: usize, - bytecode: &[u8], - abi: &[u8], - password: Option, - ) -> Result { - let abi = Abi::load(abi).map_err(|e| format!("Invalid deposit contract abi: {:?}", e))?; - let address = - deploy_deposit_contract(client.clone(), confirmations, bytecode.to_vec(), password) - .await - .map_err(|e| { - format!( - "Failed to deploy contract: {}. Is the RPC server running?.", - e - ) - })?; - - let contract = Contract::new(address, abi, client.clone()); - Ok(Self { client, contract }) - } - - /// The deposit contract's address in `0x00ab...` format. - pub fn address(&self) -> String { - format!("0x{:x}", self.contract.address()) - } - - /// A helper to return a fully-formed `DepositData`. Does not submit the deposit data to the - /// smart contact. - pub fn deposit_helper( - &self, - keypair: Keypair, - withdrawal_credentials: Hash256, - amount: u64, - ) -> DepositData { - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials, - amount, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - deposit - } - - /// Creates a random, valid deposit and submits it to the deposit contract. - /// - /// The keypairs are created randomly and destroyed. - pub async fn deposit_random(&self) -> Result<(), String> { - let keypair = Keypair::random(); - - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount: 32_000_000_000, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - self.deposit(deposit).await - } - - /// Perfoms a blocking deposit. - pub async fn deposit(&self, deposit_data: DepositData) -> Result<(), String> { - self.deposit_async(deposit_data) - .await - .map_err(|e| format!("Deposit failed: {:?}", e)) - } - - pub async fn deposit_deterministic_async( - &self, - keypair_index: usize, - amount: u64, - ) -> Result<(), String> { - let keypair = generate_deterministic_keypair(keypair_index); - - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - self.deposit_async(deposit).await - } - - /// Performs a non-blocking deposit. - pub async fn deposit_async(&self, deposit_data: DepositData) -> Result<(), String> { - let from = self - .client - .get_accounts() - .await - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(|accounts| { - accounts - .get(DEPOSIT_ACCOUNTS_INDEX) - .cloned() - .ok_or_else(|| "Insufficient accounts for deposit".to_string()) - })?; - // Note: the reason we use this `TransactionRequest` instead of just using the - // function in `self.contract` is so that the `eth1_tx_data` function gets used - // during testing. - // - // It's important that `eth1_tx_data` stays correct and does not suffer from - // code-rot. - let tx_request = TransactionRequest::new() - .from(from) - .to(self.contract.address()) - .gas(DEPOSIT_GAS) - .value(from_gwei(deposit_data.amount)) - .data(Bytes::from(encode_eth1_tx_data(&deposit_data).map_err( - |e| format!("Failed to encode deposit data: {:?}", e), - )?)); - - let pending_tx = self - .client - .send_transaction(tx_request, None) - .await - .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; - - pending_tx - .interval(Duration::from_millis(10)) - .confirmations(0) - .await - .map_err(|e| format!("Transaction failed to resolve: {:?}", e))? - .ok_or_else(|| "Transaction dropped from mempool".to_string())?; - Ok(()) - } - - /// Peforms many deposits, each preceded by a delay. - pub async fn deposit_multiple(&self, deposits: Vec) -> Result<(), String> { - for deposit in deposits.into_iter() { - sleep(deposit.delay).await; - self.deposit_async(deposit.deposit).await?; - } - Ok(()) - } -} - -/// Describes a deposit and a delay that should should precede it's submission to the deposit -/// contract. -#[derive(Clone)] -pub struct DelayThenDeposit { - /// Wait this duration ... - pub delay: Duration, - /// ... then submit this deposit. - pub deposit: DepositData, -} - -fn from_gwei(gwei: u64) -> U256 { - U256::from(gwei) * U256::exp10(9) -} - -/// Deploys the deposit contract to the given web3 instance using the account with index -/// `DEPLOYER_ACCOUNTS_INDEX`. -async fn deploy_deposit_contract( - client: Provider, - confirmations: usize, - bytecode: Vec, - password_opt: Option, -) -> Result { - let from_address = client - .get_accounts() - .await - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(|accounts| { - accounts - .get(DEPLOYER_ACCOUNTS_INDEX) - .cloned() - .ok_or_else(|| "Insufficient accounts for deployer".to_string()) - })?; - - let deploy_address = if let Some(password) = password_opt { - let result = client - .request( - "personal_unlockAccount", - vec![from_address.to_string(), password], - ) - .await; - - match result { - Ok(true) => from_address, - Ok(false) => return Err("Eth1 node refused to unlock account".to_string()), - Err(e) => return Err(format!("Eth1 unlock request failed: {:?}", e)), - } - } else { - from_address - }; - - let mut bytecode = String::from_utf8(bytecode).unwrap(); - bytecode.retain(|c| c.is_ascii_hexdigit()); - let bytecode = hex::decode(&bytecode[1..]).unwrap(); - - let deploy_tx: TypedTransaction = TransactionRequest::new() - .from(deploy_address) - .data(Bytes::from(bytecode)) - .gas(CONTRACT_DEPLOY_GAS) - .into(); - - let pending_tx = client - .send_transaction(deploy_tx, None) - .await - .map_err(|e| format!("Failed to send tx: {:?}", e))?; - - let tx = pending_tx - .interval(Duration::from_millis(500)) - .confirmations(confirmations) - .await - .map_err(|e| format!("Failed to fetch tx receipt: {:?}", e))?; - tx.and_then(|tx| tx.contract_address) - .ok_or_else(|| "Deposit contract not deployed successfully".to_string()) -} diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 8c39fda4e3..91d6c7fd57 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -14,6 +14,10 @@ pub fn build_result(repo_dir: &Path) -> Output { Command::new("make") .arg("geth") .current_dir(repo_dir) + // Geth now uses the commit hash from a GitHub runner environment variable if it detects a CI environment. + // We need to override this to successfully build Geth in Lighthouse workflows. + // See: https://github.com/ethereum/go-ethereum/blob/668c3a7278af399c0e776e92f1c721b5158388f2/internal/build/env.go#L95-L121 + .env("CI", "false") .output() .expect("failed to make geth") } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index cf0d03c24f..cd23138a1c 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -7,9 +7,7 @@ edition = { workspace = true } [dependencies] clap = { workspace = true } -env_logger = { workspace = true } environment = { workspace = true } -eth2_network_config = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } kzg = { workspace = true } diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 707baf04a7..1fa59df4fe 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -20,7 +20,7 @@ pub fn cli_app() -> Command { .short('n') .long("nodes") .action(ArgAction::Set) - .default_value("3") + .default_value("2") .help("Number of beacon nodes"), ) .arg( @@ -28,7 +28,7 @@ pub fn cli_app() -> Command { .short('p') .long("proposer-nodes") .action(ArgAction::Set) - .default_value("3") + .default_value("1") .help("Number of proposer-only beacon nodes"), ) .arg( diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index 1cc4a1779b..7bd6e546f7 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -18,16 +18,12 @@ mod local_network; mod retry; use cli::cli_app; -use env_logger::{Builder, Env}; use local_network::LocalNetwork; use types::MinimalEthSpec; pub type E = MinimalEthSpec; fn main() { - // Debugging output for libp2p and external crates. - Builder::from_env(Env::default()).init(); - let matches = cli_app().get_matches(); match matches.subcommand_name() { Some("basic-sim") => match basic_sim::run_basic_sim(&matches) { diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 3bcb0d7034..5fe2af4cb0 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -13,6 +13,7 @@ clap = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } itertools = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } slot_clock = { workspace = true } strum = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index e11cc97e79..b3158cd380 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -8,8 +8,9 @@ use beacon_node_health::{ IsOptimistic, SyncDistanceTier, }; use clap::ValueEnum; -use eth2::BeaconNodeHttpClient; +use eth2::{BeaconNodeHttpClient, Timeouts}; use futures::future; +use sensitive_url::SensitiveUrl; use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; use slot_clock::SlotClock; use std::cmp::Ordering; @@ -455,6 +456,39 @@ impl BeaconNodeFallback { (candidate_info, num_available, num_synced) } + /// Update the list of candidates with a new list. + /// Returns `Ok(new_list)` if the update was successful. + /// Returns `Err(some_err)` if the list is empty. + pub async fn update_candidates_list( + &self, + new_list: Vec, + use_long_timeouts: bool, + ) -> Result, String> { + if new_list.is_empty() { + return Err("list cannot be empty".to_string()); + } + + let timeouts: Timeouts = if new_list.len() == 1 || use_long_timeouts { + Timeouts::set_all(Duration::from_secs(self.spec.seconds_per_slot)) + } else { + Timeouts::use_optimized_timeouts(Duration::from_secs(self.spec.seconds_per_slot)) + }; + + let new_candidates: Vec = new_list + .clone() + .into_iter() + .enumerate() + .map(|(index, url)| { + CandidateBeaconNode::new(BeaconNodeHttpClient::new(url, timeouts.clone()), index) + }) + .collect(); + + let mut candidates = self.candidates.write().await; + *candidates = new_candidates; + + Ok(new_list) + } + /// Loop through ALL candidates in `self.candidates` and update their sync status. /// /// It is possible for a node to return an unsynced status while continuing to serve diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index aebe179567..d5de24229c 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -22,6 +22,7 @@ use account_utils::{ }; pub use api_secret::ApiSecret; use beacon_node_fallback::CandidateInfo; +use core::convert::Infallible; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, }; @@ -30,7 +31,7 @@ use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, PublicKey, - PublicKeyBytes, SetGraffitiRequest, + PublicKeyBytes, SetGraffitiRequest, UpdateCandidatesRequest, UpdateCandidatesResponse, }, }; use health_metrics::observe::Observe; @@ -38,6 +39,7 @@ use lighthouse_version::version_with_platform; use logging::crit; use logging::SSELoggingComponents; use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use std::collections::HashMap; @@ -53,7 +55,8 @@ use tracing::{info, warn}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use validator_services::block_service::BlockService; -use warp::{sse::Event, Filter}; +use warp::{reply::Response, sse::Event, Filter}; +use warp_utils::reject::convert_rejection; use warp_utils::task::blocking_json_task; #[derive(Debug)] @@ -102,6 +105,7 @@ pub struct Config { pub allow_keystore_export: bool, pub store_passwords_in_secrets_dir: bool, pub http_token_path: PathBuf, + pub bn_long_timeouts: bool, } impl Default for Config { @@ -121,6 +125,7 @@ impl Default for Config { allow_keystore_export: false, store_passwords_in_secrets_dir: false, http_token_path, + bn_long_timeouts: false, } } } @@ -147,6 +152,7 @@ pub fn serve( let config = &ctx.config; let allow_keystore_export = config.allow_keystore_export; let store_passwords_in_secrets_dir = config.store_passwords_in_secrets_dir; + let use_long_timeouts = config.bn_long_timeouts; // Configure CORS. let cors_builder = { @@ -839,6 +845,59 @@ pub fn serve( }) }); + // POST /lighthouse/beacon/update + let post_lighthouse_beacon_update = warp::path("lighthouse") + .and(warp::path("beacon")) + .and(warp::path("update")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(block_service_filter.clone()) + .then( + move |request: UpdateCandidatesRequest, + block_service: BlockService, T>| async move { + async fn parse_urls(urls: &[String]) -> Result, Response> { + match urls + .iter() + .map(|url| SensitiveUrl::parse(url).map_err(|e| e.to_string())) + .collect() + { + Ok(sensitive_urls) => Ok(sensitive_urls), + Err(_) => Err(convert_rejection::(Err( + warp_utils::reject::custom_bad_request( + "one or more urls could not be parsed".to_string(), + ), + )) + .await), + } + } + + let beacons: Vec = match parse_urls(&request.beacon_nodes).await { + Ok(new_beacons) => { + match block_service + .beacon_nodes + .update_candidates_list(new_beacons, use_long_timeouts) + .await + { + Ok(beacons) => beacons, + Err(e) => { + return convert_rejection::(Err( + warp_utils::reject::custom_bad_request(e.to_string()), + )) + .await + } + } + } + Err(e) => return e, + }; + + let response: UpdateCandidatesResponse = UpdateCandidatesResponse { + new_beacon_nodes_list: beacons.iter().map(|surl| surl.to_string()).collect(), + }; + + blocking_json_task(move || Ok(api_types::GenericResponse::from(response))).await + }, + ); + // Standard key-manager endpoints. let eth_v1 = warp::path("eth").and(warp::path("v1")); let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); @@ -1316,6 +1375,7 @@ pub fn serve( .or(post_std_keystores) .or(post_std_remotekeys) .or(post_graffiti) + .or(post_lighthouse_beacon_update) .recover(warp_utils::reject::handle_rejection), )) .or(warp::patch() diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 08447a82ce..8c23f79fd3 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -173,6 +173,7 @@ impl ApiTester { allow_keystore_export: true, store_passwords_in_secrets_dir: false, http_token_path: tempdir().unwrap().path().join(PK_FILENAME), + bn_long_timeouts: false, } } diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 4b1a3c0059..7d421cd7d5 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -126,6 +126,7 @@ impl ApiTester { allow_keystore_export: true, store_passwords_in_secrets_dir: false, http_token_path: token_path, + bn_long_timeouts: false, }, sse_logging_components: None, slot_clock: slot_clock.clone(), diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index cbc1287a85..957430fa57 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -159,7 +159,7 @@ pub struct InitializedValidator { impl InitializedValidator { /// Return a reference to this validator's lockfile if it has one. - pub fn keystore_lockfile(&self) -> Option> { + pub fn keystore_lockfile(&self) -> Option> { match self.signing_method.as_ref() { SigningMethod::LocalKeystore { ref voting_keystore_lockfile, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 579a2198c5..a6155320bd 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -54,24 +54,6 @@ const RETRY_DELAY: Duration = Duration::from_secs(2); /// The time between polls when waiting for genesis. const WAITING_FOR_GENESIS_POLL_TIME: Duration = Duration::from_secs(12); -/// Specific timeout constants for HTTP requests involved in different validator duties. -/// This can help ensure that proper endpoint fallback occurs. -const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT: u32 = 24; -const HTTP_ATTESTATION_AGGREGATOR_TIMEOUT_QUOTIENT: u32 = 24; // For DVT involving middleware only -const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; -const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_SYNC_AGGREGATOR_TIMEOUT_QUOTIENT: u32 = 24; // For DVT involving middleware only -const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; -const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; -const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; -const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; - const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; /// Compute attestation selection proofs this many slots before they are required. @@ -105,7 +87,6 @@ pub struct ProductionValidatorClient { slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option, config: Config, - beacon_nodes: Arc>, genesis_time: u64, } @@ -310,27 +291,7 @@ impl ProductionValidatorClient { // Use quicker timeouts if a fallback beacon node exists. let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { info!("Fallback endpoints are available, using optimized timeouts."); - Timeouts { - attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, - attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, - attestation_subscriptions: slot_duration - / HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT, - attestation_aggregators: slot_duration - / HTTP_ATTESTATION_AGGREGATOR_TIMEOUT_QUOTIENT, - liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, - proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, - proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, - sync_committee_contribution: slot_duration - / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, - sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, - sync_aggregators: slot_duration / HTTP_SYNC_AGGREGATOR_TIMEOUT_QUOTIENT, - get_beacon_blocks_ssz: slot_duration - / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, - get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, - get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - get_validator_block: slot_duration / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, - default: slot_duration / HTTP_DEFAULT_TIMEOUT_QUOTIENT, - } + Timeouts::use_optimized_timeouts(slot_duration) } else { Timeouts::set_all(slot_duration.saturating_mul(config.long_timeouts_multiplier)) }; @@ -574,7 +535,6 @@ impl ProductionValidatorClient { slot_clock, http_api_listen_addr: None, genesis_time, - beacon_nodes, }) } @@ -620,7 +580,7 @@ impl ProductionValidatorClient { }; // Wait until genesis has occurred. - wait_for_genesis(&self.beacon_nodes, self.genesis_time).await?; + wait_for_genesis(self.genesis_time).await?; duties_service::start_update_service(self.duties_service.clone(), block_service_tx); @@ -761,10 +721,7 @@ async fn init_from_beacon_node( Ok((genesis.genesis_time, genesis.genesis_validators_root)) } -async fn wait_for_genesis( - beacon_nodes: &BeaconNodeFallback, - genesis_time: u64, -) -> Result<(), String> { +async fn wait_for_genesis(genesis_time: u64) -> Result<(), String> { let now = SystemTime::now() .duration_since(UNIX_EPOCH) .map_err(|e| format!("Unable to read system time: {:?}", e))?; @@ -784,7 +741,7 @@ async fn wait_for_genesis( // Start polling the node for pre-genesis information, cancelling the polling as soon as the // timer runs out. tokio::select! { - result = poll_whilst_waiting_for_genesis(beacon_nodes, genesis_time) => result?, + result = poll_whilst_waiting_for_genesis(genesis_time) => result?, () = sleep(genesis_time - now) => () }; @@ -804,46 +761,20 @@ async fn wait_for_genesis( /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. -async fn poll_whilst_waiting_for_genesis( - beacon_nodes: &BeaconNodeFallback, - genesis_time: Duration, -) -> Result<(), String> { +async fn poll_whilst_waiting_for_genesis(genesis_time: Duration) -> Result<(), String> { loop { - match beacon_nodes - .first_success(|beacon_node| async move { beacon_node.get_lighthouse_staking().await }) - .await - { - Ok(is_staking) => { - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {:?}", e))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; - if !is_staking { - error!( - msg = "this will caused missed duties", - info = "see the --staking CLI flag on the beacon node", - "Staking is disabled for beacon node" - ); - } - - if now < genesis_time { - info!( - bn_staking_enabled = is_staking, - seconds_to_wait = (genesis_time - now).as_secs(), - "Waiting for genesis" - ); - } else { - break Ok(()); - } - } - Err(e) => { - error!( - error = %e, - "Error polling beacon node" - ); - } + if now < genesis_time { + info!( + seconds_to_wait = (genesis_time - now).as_secs(), + "Waiting for genesis" + ); + } else { + break Ok(()); } - sleep(WAITING_FOR_GENESIS_POLL_TIME).await; } } diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index f776567706..e4063cd211 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,6 +1,5 @@ use crate::duties_service::{DutiesService, DutyAndProof}; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; -use either::Either; use futures::future::join_all; use logging::crit; use slot_clock::SlotClock; @@ -461,40 +460,32 @@ impl AttestationService Some(a), - Err(e) => { - // This shouldn't happen unless BN and VC are out of sync with - // respect to the Electra fork. - error!( - error = ?e, - committee_index = attestation_data.index, - slot = slot.as_u64(), - "type" = "unaggregated", - "Unable to convert to SingleAttestation" - ); - None - } - } - }) - .collect::>(); - beacon_node - .post_beacon_pool_attestations_v2::( - Either::Right(single_attestations), - fork_name, - ) - .await - } else { - beacon_node - .post_beacon_pool_attestations_v1(attestations) - .await - } + let single_attestations = attestations + .iter() + .zip(validator_indices) + .filter_map(|(a, i)| { + match a.to_single_attestation_with_attester_index(*i) { + Ok(a) => Some(a), + Err(e) => { + // This shouldn't happen unless BN and VC are out of sync with + // respect to the Electra fork. + error!( + error = ?e, + committee_index = attestation_data.index, + slot = slot.as_u64(), + "type" = "unaggregated", + "Unable to convert to SingleAttestation" + ); + None + } + } + }) + .collect::>(); + + beacon_node + .post_beacon_pool_attestations_v2::(single_attestations, fork_name) + .await }) .await { diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index a6e9f4549a..4b3f3d9fb3 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -595,8 +595,12 @@ pub async fn fill_in_aggregation_proofs, } -/// The structure generated by the `staking-deposit-cli` which has become a quasi-standard for +/// The structure generated by the `ethstaker-deposit-cli` which has become a quasi-standard for /// browser-based deposit submission tools (e.g., the Ethereum Launchpad and Lido). /// /// We assume this code as the canonical definition: /// -/// https://github.com/ethereum/staking-deposit-cli/blob/76ed78224fdfe3daca788d12442b3d1a37978296/staking_deposit/credentials.py#L131-L144 +/// https://github.com/eth-educators/ethstaker-deposit-cli/blob/80d536374de838ccae142974ed0e747b46beb030/ethstaker_deposit/credentials.py#L164-L177 #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct StandardDepositDataJson { #[serde(with = "public_key_bytes_without_0x_prefix")] diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 07578033cd..3216417c73 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -43,7 +43,7 @@ pub fn cli_app() -> Command { contains all the validator keystores and other validator data. This file can then \ be imported to a validator client using the \"import-validators\" command. \ Another, optional JSON file is created which contains a list of validator \ - deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", + deposits in the same format as the \"ethstaker-deposit-cli\" tool.", ) .arg( Arg::new(OUTPUT_PATH_FLAG) @@ -487,7 +487,7 @@ impl ValidatorsAndDeposits { }; // Create a JSON structure equivalent to the one generated by - // `ethereum/staking-deposit-cli`. + // `ethstaker-deposit-cli`. let json_deposit = StandardDepositDataJson::new( &voting_keypair, withdrawal_credentials.into(), @@ -596,7 +596,7 @@ pub mod tests { type E = MainnetEthSpec; - const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.7.0"; + const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "1.2.2"; // Update to ethstaker-deposit-cli version fn junk_execution_address() -> Option
{ Some(Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap()) @@ -882,7 +882,7 @@ pub mod tests { } #[tokio::test] - async fn staking_deposit_cli_vectors() { + async fn ethstaker_deposit_cli_vectors() { let vectors_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("test_vectors") .join("vectors"); diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 63c7ca4596..6cfbf7b54e 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -55,7 +55,7 @@ pub fn cli_app() -> Command { .help( "The path to a keystore JSON file to be \ imported to the validator client. This file is usually created \ - using staking-deposit-cli or ethstaker-deposit-cli", + using ethstaker-deposit-cli", ) .action(ArgAction::Set) .display_order(0) diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py index 8bf7f5f52d..4f584bd876 100644 --- a/validator_manager/test_vectors/generate.py +++ b/validator_manager/test_vectors/generate.py @@ -1,4 +1,4 @@ -# This script uses the `ethereum/staking-deposit-cli` tool to generate +# This script uses the `ethstaker-deposit-cli` tool to generate # deposit data files which are then used for testing by Lighthouse. # # To generate vectors, run this Python script: @@ -6,7 +6,7 @@ # `python generate.py` # # This script was last run on Linux using Python v3.10.4. Python v3.11.0 was not working at time -# of writing due to dependency issues in `staking-deposit-cli`. You should probably use `pyenv` and +# of writing due to dependency issues in `ethstaker-deposit-cli`. You should probably use `pyenv` and # `virtualenv`. import os import sys @@ -23,7 +23,7 @@ WALLET_NAME="test_wallet" tmp_dir = os.path.join(".", "tmp") mnemonic_path = os.path.join(tmp_dir, "mnemonic.txt") sdc_dir = os.path.join(tmp_dir, "sdc") -sdc_git_dir = os.path.join(sdc_dir, "staking-deposit-cli") +sdc_git_dir = os.path.join(sdc_dir, "ethstaker-deposit-cli") vectors_dir = os.path.join(".", "vectors") @@ -59,7 +59,7 @@ def setup_sdc(): "git", "clone", "--single-branch", - "https://github.com/ethereum/staking-deposit-cli.git", + "https://github.com/eth-educators/ethstaker-deposit-cli.git", str(sdc_git_dir) ]) assert(result.returncode == 0) @@ -71,9 +71,9 @@ def setup_sdc(): ], cwd=sdc_git_dir) assert(result.returncode == 0) result = subprocess.run([ - "python", - "setup.py", + "pip", "install", + ".", ], cwd=sdc_git_dir) assert(result.returncode == 0) @@ -100,7 +100,9 @@ def sdc_generate(network, first_index, count, eth1_withdrawal_address=None): '--num_validators', str(count), '--mnemonic', TEST_MNEMONIC, '--chain', network, - '--keystore_password', 'MyPassword', + '--keystore_password', 'MyPassword1234', # minimum 12 characters for password + '--withdrawal_address', '', # no withdrawal address set so it maintains 0x00 withdrawal credentials + '--regular-withdrawal', # no compounding '--folder', os.path.abspath(output_dir), ] + eth1_flags diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1748939223.json similarity index 90% rename from validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json rename to validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1748939223.json index 6b343d087a..b2c6085197 100644 --- a/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1748939223.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1748939227.json similarity index 90% rename from validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json rename to validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1748939227.json index f70410746b..e12b813e3c 100644 --- a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1748939227.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "997cff67c1675ecd2467ac050850ddec8b0488995abf363cee40cbe1461043acf4e68422e9731340437d566542e010cd186031dc0de30b2f56d19f3bb866e0fa9be31dd49ea27777f25ad786cc8587fb745598e5870647b6deeaab77fba4a9e4", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "8787f86d699426783983d03945a8ebe45b349118d28e8af528b9695887f98fac", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "997cff67c1675ecd2467ac050850ddec8b0488995abf363cee40cbe1461043acf4e68422e9731340437d566542e010cd186031dc0de30b2f56d19f3bb866e0fa9be31dd49ea27777f25ad786cc8587fb745598e5870647b6deeaab77fba4a9e4", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "8787f86d699426783983d03945a8ebe45b349118d28e8af528b9695887f98fac", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1748939246.json similarity index 90% rename from validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json rename to validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1748939246.json index 9b2678651f..bdb31d8bf2 100644 --- a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1748939246.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8eed5bb34dec5fdee4a3e68a774143072af0ebdae26a9b24ea0601d516a5eeb18aa2ec804be3f05f8475f2e472ce91809d93b7586c3a90fc8a7bbb63ad1f762eee3df0dc0ea3d33dd8ba782e48de495b3bc76e280658c1406e11d07db659e69", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "74ead0279baa86ed7106268e4806484eaae26a8f1c42f693e4b3cb626c724b63", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "8d87cdd627ed169114c00653fd3167e2afc917010071bbbbddd60e331ed0d0d7273cb4a887efe63e7b840bac713420d907e9dac20df56e50e7346b59e3acfe56753234a34c7ab3d8c40ea00b447db005b4b780701a0a2416c4fdadbdb18bf174", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "978b04b76d0a56ff28beb8eb1859792e0967d0b51e4a31485d2078b8390954d2", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8eed5bb34dec5fdee4a3e68a774143072af0ebdae26a9b24ea0601d516a5eeb18aa2ec804be3f05f8475f2e472ce91809d93b7586c3a90fc8a7bbb63ad1f762eee3df0dc0ea3d33dd8ba782e48de495b3bc76e280658c1406e11d07db659e69", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "74ead0279baa86ed7106268e4806484eaae26a8f1c42f693e4b3cb626c724b63", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "8d87cdd627ed169114c00653fd3167e2afc917010071bbbbddd60e331ed0d0d7273cb4a887efe63e7b840bac713420d907e9dac20df56e50e7346b59e3acfe56753234a34c7ab3d8c40ea00b447db005b4b780701a0a2416c4fdadbdb18bf174", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "978b04b76d0a56ff28beb8eb1859792e0967d0b51e4a31485d2078b8390954d2", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1748939241.json similarity index 87% rename from validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json rename to validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1748939241.json index 997260bb87..aa7b311ef9 100644 --- a/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json +++ b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1748939241.json @@ -1 +1 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "818141f1f2fdba651f6a3de4ed43c774974b6cec82b3e6c3fa00569b6b67a88c37742d0033275dc98b4bbaac875e48b416b89cebfd1fe9996e2a29c0a2c512d1cedff558420a1a2b50cf5c743a622d85d941b896b00520b3e9a3eaf1f5eff12c", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "9c9f6ed171b93a08f4e1bc46c0a7feace6466e3e213c6c2d567428c73e22e242", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b62103a32290ec8c710d48f3147895a2dddb25231c9ae38b8ca12bcaf30770a9fc632f4da6b3c5b7a43cfa6a9f096f5e13d26b2c68a42c1c86385aea268dcd2ad3cf766b3f01ee2ba19379ddae9c15830aac8acbef20accc82c734f4c40e5ffd", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "37b75d75086f4b980c85c021ca22343008d445061714cff41d63aea4dca49a5f", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "af2dc295084b4a3eff01a52fe5d42aa931509c24328d5304e59026d0957b55bc35e64802a8d64fdb4a9700bf12e1d6bb184eba01682d8413d86b737e63d3d79a16243d9c8e00115a202efc889ef7129861d8aa32bf8ec9ef5305eecce87b2eda", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "fd0c081818d2ce1bc54b7979e9b348bbbdb8fe5904694143bf4b355dcbbde692", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "818141f1f2fdba651f6a3de4ed43c774974b6cec82b3e6c3fa00569b6b67a88c37742d0033275dc98b4bbaac875e48b416b89cebfd1fe9996e2a29c0a2c512d1cedff558420a1a2b50cf5c743a622d85d941b896b00520b3e9a3eaf1f5eff12c", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "9c9f6ed171b93a08f4e1bc46c0a7feace6466e3e213c6c2d567428c73e22e242", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b62103a32290ec8c710d48f3147895a2dddb25231c9ae38b8ca12bcaf30770a9fc632f4da6b3c5b7a43cfa6a9f096f5e13d26b2c68a42c1c86385aea268dcd2ad3cf766b3f01ee2ba19379ddae9c15830aac8acbef20accc82c734f4c40e5ffd", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "37b75d75086f4b980c85c021ca22343008d445061714cff41d63aea4dca49a5f", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "af2dc295084b4a3eff01a52fe5d42aa931509c24328d5304e59026d0957b55bc35e64802a8d64fdb4a9700bf12e1d6bb184eba01682d8413d86b737e63d3d79a16243d9c8e00115a202efc889ef7129861d8aa32bf8ec9ef5305eecce87b2eda", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "fd0c081818d2ce1bc54b7979e9b348bbbdb8fe5904694143bf4b355dcbbde692", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1748939232.json similarity index 90% rename from validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json rename to validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1748939232.json index 4fa3724c59..344bc8e5c0 100644 --- a/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json +++ b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1748939232.json @@ -1 +1 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "b687aa7d55752f00a060c21fa9287485bab94c841d96b3516263fb384a812c92e60ef9fa2e09add9f55db71961fc051e0bb83d214b6f31d04ee59eaba3b43e27eadd2a64884c5d4125a1f5bd6e1d930e5a1e420c278c697d4af6ed3fcdac16cf", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "54dc56d2838ca70bac89ca92ae1f8d04945d3305ce8507b390756b646163387a", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "b687aa7d55752f00a060c21fa9287485bab94c841d96b3516263fb384a812c92e60ef9fa2e09add9f55db71961fc051e0bb83d214b6f31d04ee59eaba3b43e27eadd2a64884c5d4125a1f5bd6e1d930e5a1e420c278c697d4af6ed3fcdac16cf", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "54dc56d2838ca70bac89ca92ae1f8d04945d3305ce8507b390756b646163387a", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1748939236.json similarity index 90% rename from validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json rename to validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1748939236.json index 7436b53f24..9dffddd89a 100644 --- a/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json +++ b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1748939236.json @@ -1 +1 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a59a2c510c5ce378b514f62550a7115cd6cfebaf73a5ba20c2cf21456a2d2c11d6e117b91d23743fc0361794cf7e5405030eb296926b526e8a2d68aa87569358e69d3884563a23770714730b6fab6ba639977d725a5ed4f29abe3ccc34575610", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "149a5dfbba87109dac65142cc067aed97c9579730488cfe16625be3ce4f753a6", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "966ae45b81402f1155ff313e48ca3a5346264dcc4bc9ee9e69994ee74368852d9d27c1684752735feba6c21042ad366b13f12c6e772c453518900435d87e2d743e1818e7471cf3574598e3b085c4527f643efe679841ddf8a480cac12b2c6e08", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "f44dac412ae36929a84f64d5f7f91cada908a8f9e837fc70628f58804591798d", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a59a2c510c5ce378b514f62550a7115cd6cfebaf73a5ba20c2cf21456a2d2c11d6e117b91d23743fc0361794cf7e5405030eb296926b526e8a2d68aa87569358e69d3884563a23770714730b6fab6ba639977d725a5ed4f29abe3ccc34575610", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "149a5dfbba87109dac65142cc067aed97c9579730488cfe16625be3ce4f753a6", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "966ae45b81402f1155ff313e48ca3a5346264dcc4bc9ee9e69994ee74368852d9d27c1684752735feba6c21042ad366b13f12c6e772c453518900435d87e2d743e1818e7471cf3574598e3b085c4527f643efe679841ddf8a480cac12b2c6e08", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "f44dac412ae36929a84f64d5f7f91cada908a8f9e837fc70628f58804591798d", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1748939195.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1748939195.json index d9ba926d1c..f8005651aa 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1748939195.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1748939200.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1748939200.json index f1ea4c6ad3..a8b1a056c4 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1748939200.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1748939218.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1748939218.json index 5741f23d8f..c3c25e9854 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1748939218.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1748939214.json similarity index 87% rename from validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json rename to validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1748939214.json index 9b9556cf9d..6bb47f5280 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1748939214.json @@ -1 +1 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1748939204.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json rename to validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1748939204.json index 84140f53fe..ec53025149 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1748939204.json @@ -1 +1 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1748939209.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json rename to validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1748939209.json index 3205390a43..7374811091 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1748939209.json @@ -1 +1 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "1.2.2"}] \ No newline at end of file diff --git a/wordlist.txt b/wordlist.txt index 682fae0261..3c7070c642 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -34,7 +34,7 @@ Esat's ETH EthDocker Ethereum -Ethstaker +EthStaker Exercism Extractable FFG @@ -89,6 +89,7 @@ SSD SSL SSZ Styleguide +TBD TCP Teku TLS