diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 1ca1006c1f..9223c40e15 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -25,9 +25,23 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: npm install ganache@latest --global - + - name: Install geth (ubuntu) + if: matrix.os == 'ubuntu-22.04' + run: | + sudo add-apt-repository -y ppa:ethereum/ethereum + sudo apt-get update + sudo apt-get install ethereum + - name: Install geth (mac) + if: matrix.os == 'macos-12' + run: | + brew tap ethereum/ethereum + brew install ethereum + - name: Install GNU sed & GNU grep + if: matrix.os == 'macos-12' + run: | + brew install gnu-sed grep + echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH + echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - uses: actions/cache@v3 id: cache-cargo @@ -44,7 +58,7 @@ jobs: run: make && make install-lcli - name: Start local testnet - run: ./start_local_testnet.sh && sleep 60 + run: ./start_local_testnet.sh genesis.json && sleep 60 working-directory: scripts/local_testnet - name: Print logs @@ -60,7 +74,7 @@ jobs: working-directory: scripts/local_testnet - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -p && sleep 60 + run: ./start_local_testnet.sh -p genesis.json && sleep 60 working-directory: scripts/local_testnet - name: Print logs for blinded block testnet @@ -69,4 +83,4 @@ jobs: - name: Stop local testnet with blinded block production run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet + working-directory: scripts/local_testnet \ No newline at end of file diff --git a/.github/workflows/publish-crate.yml b/.github/workflows/publish-crate.yml deleted file mode 100644 index 736057f785..0000000000 --- a/.github/workflows/publish-crate.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Publish Crate - -on: - push: - tags: - - tree-hash-v* - - tree-hash-derive-v* - - eth2-ssz-v* - - eth2-ssz-derive-v* - - eth2-ssz-types-v* - - eth2-serde-util-v* - - eth2-hashing-v* - -env: - CARGO_API_TOKEN: ${{ secrets.CARGO_API_TOKEN }} - -jobs: - extract-tag: - runs-on: ubuntu-latest - steps: - - name: Extract tag - run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT - id: extract_tag - outputs: - TAG: ${{ steps.extract_tag.outputs.TAG }} - - publish-crate: - runs-on: ubuntu-latest - needs: [extract-tag] - env: - TAG: ${{ needs.extract-tag.outputs.TAG }} - steps: - - uses: actions/checkout@v3 - - name: Update Rust - run: rustup update stable - - name: Cargo login - run: | - echo "${CARGO_API_TOKEN}" | cargo login - - name: publish eth2 ssz derive - if: startsWith(env.TAG, 'eth2-ssz-derive-v') - run: | - ./scripts/ci/publish.sh consensus/ssz_derive eth2_ssz_derive "$TAG" - - name: publish eth2 ssz - if: startsWith(env.TAG, 'eth2-ssz-v') - run: | - ./scripts/ci/publish.sh consensus/ssz eth2_ssz "$TAG" - - name: publish eth2 hashing - if: startsWith(env.TAG, 'eth2-hashing-v') - run: | - ./scripts/ci/publish.sh crypto/eth2_hashing eth2_hashing "$TAG" - - name: publish tree hash derive - if: startsWith(env.TAG, 'tree-hash-derive-v') - run: | - ./scripts/ci/publish.sh consensus/tree_hash_derive tree_hash_derive "$TAG" - - name: publish tree hash - if: startsWith(env.TAG, 'tree-hash-v') - run: | - ./scripts/ci/publish.sh consensus/tree_hash tree_hash "$TAG" - - name: publish ssz types - if: startsWith(env.TAG, 'eth2-ssz-types-v') - run: | - ./scripts/ci/publish.sh consensus/ssz_types eth2_ssz_types "$TAG" - - name: publish serde util - if: startsWith(env.TAG, 'eth2-serde-util-v') - run: | - ./scripts/ci/publish.sh consensus/serde_utils eth2_serde_utils "$TAG" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e6d79bd5ef..8142184415 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -134,17 +134,11 @@ jobs: - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - # NOTE: profile set to release until this rustc issue is fixed: - # - # https://github.com/rust-lang/rust/issues/107781 - # - # tracked at: https://github.com/sigp/lighthouse/issues/3964 - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - # NOTE: profile set to release (see above) - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release + run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 27c91f2262..e6b75ea7b1 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -58,8 +58,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run tests in release run: make test-release release-tests-windows: @@ -78,8 +78,9 @@ jobs: run: | choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - - name: Install ganache - run: npm install -g ganache --loglevel verbose + - name: Install anvil + # Extra feature to work around https://github.com/foundry-rs/foundry/issues/5115 + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil --features ethers/ipc - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -140,8 +141,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -196,8 +197,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -212,8 +213,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -228,8 +229,6 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: @@ -244,8 +243,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: @@ -260,20 +259,23 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install geth + run: | + sudo add-apt-repository -y ppa:ethereum/ethereum + sudo apt-get update + sudo apt-get install ethereum - name: Install lighthouse and lcli run: | make make install-lcli - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success - name: Run the doppelganger protection failure test script run: | cd scripts/tests - ./doppelganger_protection.sh failure + ./doppelganger_protection.sh failure genesis.json + - name: Run the doppelganger protection success test script + run: | + cd scripts/tests + ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 901c2e8aff..5e6deea1d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,16 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "account_manager" version = "0.3.5" @@ -151,9 +161,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" dependencies = [ "aead 0.5.2", "aes 0.8.2", @@ -189,16 +199,27 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -228,9 +249,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arbitrary" @@ -271,7 +292,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -287,7 +308,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -327,9 +348,9 @@ dependencies = [ [[package]] name = "asn1_der" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] name = "async-io" @@ -362,9 +383,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -373,13 +394,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -390,7 +411,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -419,9 +440,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "attohttpc" @@ -448,9 +469,9 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8c1df849285fbacd587de7818cc7d13be6cd2cbcd47a04fb1801b0e2706e33" +checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2", @@ -530,7 +551,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.6.2", "object", "rustc-demangle", ] @@ -547,6 +568,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -555,9 +582,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" [[package]] name = "base64ct" @@ -636,13 +663,14 @@ dependencies = [ "tokio", "tokio-stream", "tree_hash", + "tree_hash_derive", "types", "unused_port", ] [[package]] name = "beacon_node" -version = "4.1.0" +version = "4.3.0" dependencies = [ "beacon_chain", "clap", @@ -738,7 +766,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -821,7 +849,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.1.0" +version = "4.3.0" dependencies = [ "beacon_node", "clap", @@ -875,9 +903,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" [[package]] name = "byte-slice-cast" @@ -936,6 +964,38 @@ dependencies = [ "tree_hash", ] +[[package]] +name = "camino" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.17", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cast" version = "0.3.0" @@ -1120,7 +1180,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.20", + "time 0.3.21", "timer", "tokio", "types", @@ -1135,16 +1195,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "compare_fields" version = "0.2.0" @@ -1162,9 +1212,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -1208,9 +1258,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -1277,9 +1327,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1336,6 +1386,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "crypto-bigint" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1408,12 +1470,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.5" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" +checksum = "04d778600249295e82b6ab12e291ed9029407efee0cfb7baf67157edc65964df" dependencies = [ "nix 0.26.2", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1436,6 +1498,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ "cfg-if", + "digest 0.10.7", "fiat-crypto", "packed_simd_2", "platforms 3.0.2", @@ -1443,50 +1506,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn 2.0.13", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.13", -] - [[package]] name = "darling" version = "0.13.4" @@ -1579,15 +1598,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1595,9 +1614,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1634,7 +1653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] @@ -1662,6 +1681,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der-parser" version = "7.0.0" @@ -1757,9 +1786,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" +checksum = "72eb77396836a4505da85bae0712fa324b74acfe1876d7c2f7e694ef3d0ee373" dependencies = [ "bitflags", "byteorder", @@ -1803,11 +1832,12 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -1864,15 +1894,15 @@ dependencies = [ [[package]] name = "discv5" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b009a99b85b58900df46435307fc5c4c845af7e182582b1fbf869572fa9fce69" +checksum = "77f32d27968ba86689e3f0eccba0383414348a6fc5918b0a639c98dd81e20ed6" dependencies = [ "aes 0.7.5", "aes-gcm 0.9.4", "arrayvec", "delay_map", - "enr 0.7.0", + "enr 0.8.1", "fnv", "futures", "hashlink 0.7.0", @@ -1888,8 +1918,6 @@ dependencies = [ "smallvec", "socket2 0.4.9", "tokio", - "tokio-stream", - "tokio-util 0.6.10", "tracing", "tracing-subscriber", "uint", @@ -1898,13 +1926,13 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -1913,16 +1941,36 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "ecdsa" version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + +[[package]] +name = "ecdsa" +version = "0.16.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" +dependencies = [ + "der 0.7.6", + "digest 0.10.7", + "elliptic-curve 0.13.5", + "rfc6979 0.4.0", + "signature 2.1.0", + "spki 0.7.2", ] [[package]] @@ -1931,7 +1979,17 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ - "signature", + "signature 1.6.4", +] + +[[package]] +name = "ed25519" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +dependencies = [ + "pkcs8 0.10.2", + "signature 2.1.0", ] [[package]] @@ -1941,13 +1999,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek 3.2.0", - "ed25519", + "ed25519 1.5.3", "rand 0.7.3", "serde", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek" +version = "2.0.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "798f704d128510932661a3489b08e3f4c934a01d61c5def59ae7b8e48f19665a" +dependencies = [ + "curve25519-dalek 4.0.0-rc.2", + "ed25519 2.2.1", + "rand_core 0.6.4", + "serde", + "sha2 0.10.6", + "zeroize", +] + [[package]] name = "ef_tests" version = "0.2.0" @@ -1991,18 +2063,37 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "digest 0.10.6", - "ff", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", "generic-array", - "group", + "group 0.12.1", "hkdf", "pem-rfc7468", - "pkcs8", + "pkcs8 0.9.0", "rand_core 0.6.4", - "sec1", + "sec1 0.3.0", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.2", + "digest 0.10.7", + "ff 0.13.0", + "generic-array", + "group 0.13.0", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sec1 0.7.2", "subtle", "zeroize", ] @@ -2026,32 +2117,31 @@ dependencies = [ "bs58", "bytes", "hex", - "k256", + "k256 0.11.6", "log", "rand 0.8.5", "rlp", "serde", - "sha3 0.10.6", + "sha3 0.10.8", "zeroize", ] [[package]] name = "enr" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad" +checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116" dependencies = [ "base64 0.13.1", - "bs58", "bytes", - "ed25519-dalek", + "ed25519-dalek 2.0.0-rc.2", "hex", - "k256", + "k256 0.13.1", "log", "rand 0.8.5", "rlp", "serde", - "sha3 0.10.6", + "sha3 0.10.8", "zeroize", ] @@ -2114,13 +2204,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2172,7 +2262,6 @@ dependencies = [ "tokio", "tree_hash", "types", - "web3", ] [[package]] @@ -2180,11 +2269,14 @@ name = "eth1_test_rig" version = "0.2.0" dependencies = [ "deposit_contract", + "ethers-contract", + "ethers-core", + "ethers-providers", + "hex", "serde_json", "tokio", "types", "unused_port", - "web3", ] [[package]] @@ -2201,7 +2293,9 @@ dependencies = [ "futures-util", "libsecp256k1", "lighthouse_network", + "mediatype", "mime", + "pretty_reqwest_error", "procinfo", "proto_array", "psutil", @@ -2212,6 +2306,7 @@ dependencies = [ "serde_json", "slashing_protection", "store", + "tokio", "types", ] @@ -2337,7 +2432,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.6", + "sha3 0.10.8", "thiserror", "uint", ] @@ -2471,6 +2566,65 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ethers-contract" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" +dependencies = [ + "ethers-contract-abigen", + "ethers-contract-derive", + "ethers-core", + "ethers-providers", + "futures-util", + "hex", + "once_cell", + "pin-project", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "ethers-contract-abigen" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" +dependencies = [ + "Inflector", + "cfg-if", + "dunce", + "ethers-core", + "eyre", + "getrandom 0.2.9", + "hex", + "proc-macro2", + "quote", + "regex", + "reqwest", + "serde", + "serde_json", + "syn 1.0.109", + "toml", + "url", + "walkdir", +] + +[[package]] +name = "ethers-contract-derive" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f192e8e4cf2b038318aae01e94e7644e0659a76219e94bcd3203df744341d61f" +dependencies = [ + "ethers-contract-abigen", + "ethers-core", + "hex", + "proc-macro2", + "quote", + "serde_json", + "syn 1.0.109", +] + [[package]] name = "ethers-core" version = "1.0.2" @@ -2479,12 +2633,14 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", + "cargo_metadata", "chrono", - "elliptic-curve", + "elliptic-curve 0.12.3", "ethabi 18.0.0", "generic-array", "hex", - "k256", + "k256 0.11.6", + "once_cell", "open-fastrlp", "rand 0.8.5", "rlp", @@ -2492,6 +2648,7 @@ dependencies = [ "serde", "serde_json", "strum", + "syn 1.0.109", "thiserror", "tiny-keccak", "unicode-xid", @@ -2511,7 +2668,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.8", + "getrandom 0.2.9", "hashers", "hex", "http", @@ -2590,6 +2747,7 @@ dependencies = [ "lru 0.7.8", "mev-rs", "parking_lot 0.12.1", + "pretty_reqwest_error", "rand 0.8.5", "reqwest", "sensitive_url", @@ -2623,6 +2781,16 @@ dependencies = [ "futures", ] +[[package]] +name = "eyre" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -2654,6 +2822,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ffi-opaque" version = "2.0.1" @@ -2717,13 +2895,13 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2844,9 +3022,9 @@ checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -2865,7 +3043,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -2932,6 +3110,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -2970,9 +3149,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "js-sys", @@ -3041,16 +3220,27 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff", + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -3061,7 +3251,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", ] @@ -3092,7 +3282,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -3101,7 +3291,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", ] [[package]] @@ -3124,11 +3323,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -3233,7 +3432,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3373,9 +3572,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -3397,15 +3596,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", - "rustls 0.20.8", + "rustls 0.21.1", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls 0.24.0", ] [[package]] @@ -3423,26 +3622,25 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.54" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.46.0", + "windows 0.48.0", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -3550,7 +3748,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.4.0", + "parity-scale-codec 3.5.0", ] [[package]] @@ -3591,6 +3789,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -3661,13 +3865,13 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3736,35 +3940,20 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "jsonwebtoken" version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "pem", "ring", "serde", @@ -3779,17 +3968,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.6", - "sha3 0.10.6", + "sha3 0.10.8", +] + +[[package]] +name = "k256" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +dependencies = [ + "cfg-if", + "ecdsa 0.16.7", + "elliptic-curve 0.13.5", + "once_cell", + "sha2 0.10.6", + "signature 2.1.0", ] [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -3821,7 +4024,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.1.0" +version = "4.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -3836,6 +4039,7 @@ dependencies = [ "eth2", "eth2_network_config", "eth2_wallet", + "ethereum_hashing", "ethereum_ssz", "genesis", "int_to_bytes", @@ -3843,6 +4047,7 @@ dependencies = [ "lighthouse_version", "log", "malloc_utils", + "rayon", "sensitive_url", "serde", "serde_json", @@ -3853,7 +4058,6 @@ dependencies = [ "tree_hash", "types", "validator_dir", - "web3", ] [[package]] @@ -3881,15 +4085,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.140" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libflate" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" +checksum = "5ff4ae71b685bbad2f2f391fe74f6b7659a34871c08b210fdc039e43bee07d18" dependencies = [ "adler32", "crc32fast", @@ -3923,9 +4127,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libmdbx" @@ -3951,7 +4155,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.8", + "getrandom 0.2.9", "instant", "libp2p-core 0.38.0", "libp2p-dns", @@ -3982,7 +4186,7 @@ checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38" dependencies = [ "asn1_der", "bs58", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "either", "fnv", "futures", @@ -4017,7 +4221,7 @@ checksum = "b6a8fcd392ff67af6cc3f03b1426c41f7f26b6b9aff2dc632c1c56dd649e571f" dependencies = [ "asn1_der", "bs58", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "either", "fnv", "futures", @@ -4036,7 +4240,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", - "sec1", + "sec1 0.3.0", "sha2 0.10.6", "smallvec", "thiserror", @@ -4047,9 +4251,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.39.1" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7f8b7d65c070a5a1b5f8f0510648189da08f787b8963f8e21219e0710733af" +checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" dependencies = [ "either", "fnv", @@ -4140,18 +4344,18 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" +checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" dependencies = [ "bs58", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "log", "multiaddr 0.17.1", "multihash 0.17.0", - "prost", "quick-protobuf", "rand 0.8.5", + "sha2 0.10.6", "thiserror", "zeroize", ] @@ -4325,7 +4529,7 @@ checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.39.1", + "libp2p-core 0.39.2", "libp2p-identity", "rcgen 0.10.0", "ring", @@ -4363,7 +4567,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "webrtc", ] @@ -4461,9 +4665,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", @@ -4472,7 +4676,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.1.0" +version = "4.3.0" dependencies = [ "account_manager", "account_utils", @@ -4581,15 +4785,6 @@ dependencies = [ "target_info", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -4598,9 +4793,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.1" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lmdb-rkv" @@ -4654,11 +4849,18 @@ dependencies = [ name = "logging" version = "0.2.0" dependencies = [ + "chrono", "lazy_static", "lighthouse_metrics", + "parking_lot 0.12.1", + "serde", + "serde_json", "slog", + "slog-async", "slog-term", "sloggers", + "take_mut", + "tokio", ] [[package]] @@ -4755,7 +4957,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4769,6 +4971,12 @@ dependencies = [ "libc", ] +[[package]] +name = "mediatype" +version = "0.19.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea6e62614ab2fc0faa58bb15102a0382d368f896a9fa4776592589ab55c4de7" + [[package]] name = "memchr" version = "2.5.0" @@ -4909,6 +5117,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.6" @@ -5020,7 +5237,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "core2", - "digest 0.10.6", + "digest 0.10.7", "multihash-derive", "sha2 0.10.6", "unsigned-varint 0.7.1", @@ -5033,9 +5250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ "core2", - "digest 0.10.6", "multihash-derive", - "sha2 0.10.6", "unsigned-varint 0.7.1", ] @@ -5311,9 +5526,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] @@ -5348,7 +5563,7 @@ dependencies = [ "autocfg 0.1.8", "byteorder", "lazy_static", - "libm 0.2.6", + "libm 0.2.7", "num-integer", "num-iter", "num-traits", @@ -5486,9 +5701,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.49" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d2f106ab837a24e03672c59b1239669a0596406ff657c3c0835b6b7f0f35a33" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags", "cfg-if", @@ -5507,7 +5722,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -5518,18 +5733,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.2+1.1.1t" +version = "111.25.3+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" +checksum = "924757a6a226bf60da5f7dd0311a34d2b52283dd82ddeb103208ddc66362f80c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.84" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a20eace9dc2d82904039cb76dcf50fb1a0bba071cfd1629720b5d6f1ddba0fa" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", @@ -5574,8 +5789,8 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] @@ -5585,8 +5800,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] @@ -5616,9 +5831,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" +checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -5654,9 +5869,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -5800,22 +6015,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -5842,15 +6057,25 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.6", + "spki 0.7.2", ] [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" @@ -5894,9 +6119,9 @@ dependencies = [ [[package]] name = "polling" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", "bitflags", @@ -5905,7 +6130,7 @@ dependencies = [ "libc", "log", "pin-project-lite 0.2.9", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -5940,7 +6165,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.5.0", + "universal-hash 0.5.1", ] [[package]] @@ -5949,7 +6174,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "byteorder", "bytes", "fallible-iterator", @@ -5980,13 +6205,21 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "pq-sys" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b845d6d8ec554f972a2c5298aad68953fd64e7441e846075450b44656a016d1" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" dependencies = [ "vcpkg", ] +[[package]] +name = "pretty_reqwest_error" +version = "0.1.0" +dependencies = [ + "reqwest", + "sensitive_url", +] + [[package]] name = "prettyplease" version = "0.1.25" @@ -6066,9 +6299,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.55" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0dd4be24fcdcfeaa12a432d588dc59bbad6cad3510c67e74a2b6b2fc950564" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" dependencies = [ "unicode-ident", ] @@ -6125,9 +6358,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -6135,9 +6368,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", @@ -6170,9 +6403,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", @@ -6183,9 +6416,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] @@ -6298,9 +6531,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -6397,7 +6630,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -6448,7 +6681,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.20", + "time 0.3.21", "x509-parser 0.13.2", "yasna", ] @@ -6461,7 +6694,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.20", + "time 0.3.21", "yasna", ] @@ -6489,20 +6722,20 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.3" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -6511,7 +6744,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] @@ -6521,12 +6754,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] -name = "reqwest" -version = "0.11.16" +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" + +[[package]] +name = "reqwest" +version = "0.11.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "bytes", "encoding_rs", "futures-core", @@ -6545,15 +6784,15 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.8", + "rustls 0.21.1", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.23.4", - "tokio-util 0.7.7", + "tokio-rustls 0.24.0", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", @@ -6580,11 +6819,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -6686,16 +6935,16 @@ dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.1", + "hashlink 0.8.2", "libsqlite3-sys", "smallvec", ] [[package]] name = "rustc-demangle" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -6738,16 +6987,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.6" +version = "0.37.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d097081ed288dfe45699b72f5b5d648e5f15d64d900c7080273baa20c16a6849" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6775,13 +7024,35 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct 0.7.0", +] + [[package]] name = "rustls-pemfile" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -6837,21 +7108,21 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" +checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.4.0", + "parity-scale-codec 3.5.0", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" +checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6889,12 +7160,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "scrypt" version = "0.7.0" @@ -6945,37 +7210,33 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", - "der", + "base16ct 0.1.1", + "der 0.6.1", "generic-array", - "pkcs8", + "pkcs8 0.9.0", "subtle", "zeroize", ] [[package]] -name = "secp256k1" -version = "0.21.3" +name = "sec1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" +checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e" dependencies = [ - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", + "base16ct 0.2.0", + "der 0.7.6", + "generic-array", + "pkcs8 0.10.2", + "subtle", + "zeroize", ] [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -6986,9 +7247,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -7008,6 +7269,9 @@ name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +dependencies = [ + "serde", +] [[package]] name = "semver-parser" @@ -7031,9 +7295,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.159" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] @@ -7060,20 +7324,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.159" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] name = "serde_json" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -7088,7 +7352,7 @@ checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -7158,7 +7422,7 @@ checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7169,7 +7433,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7193,7 +7457,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7210,11 +7474,11 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -7248,7 +7512,17 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +dependencies = [ + "digest 0.10.7", "rand_core 0.6.4", ] @@ -7261,7 +7535,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -7393,7 +7667,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -7438,7 +7712,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -7516,12 +7790,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -7553,7 +7827,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", +] + +[[package]] +name = "spki" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +dependencies = [ + "base64ct", + "der 0.7.6", ] [[package]] @@ -7582,9 +7866,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8052a1004e979c0be24b9e55940195553103cc57d0b34f7e2c4e32793325e402" +checksum = "e43767964a80b2fdeda7a79a57a2b6cbca966688d5b81da8fe91140a94f552a1" dependencies = [ "arbitrary", "derivative", @@ -7796,9 +8080,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.13" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" dependencies = [ "proc-macro2", "quote", @@ -7840,9 +8124,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags", "core-foundation", @@ -8002,7 +8286,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -8037,9 +8321,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "libc", @@ -8051,15 +8335,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -8130,9 +8414,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.27.0" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg 1.1.0", "bytes", @@ -8144,7 +8428,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.4.9", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -8159,13 +8443,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -8197,9 +8481,9 @@ dependencies = [ "pin-project-lite 0.2.9", "postgres-protocol", "postgres-types", - "socket2 0.5.1", + "socket2 0.5.3", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] @@ -8225,15 +8509,25 @@ dependencies = [ ] [[package]] -name = "tokio-stream" -version = "0.1.12" +name = "tokio-rustls" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +dependencies = [ + "rustls 0.21.1", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] @@ -8283,9 +8577,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -8368,20 +8662,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -8410,9 +8704,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -8428,9 +8722,9 @@ dependencies = [ [[package]] name = "trackable" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017e2a1a93718e4e8386d037cfb8add78f1d690467f4350fb582f55af1203167" +checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" dependencies = [ "trackable_derive", ] @@ -8645,6 +8939,7 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", + "strum", "superstruct 0.6.0", "swap_or_not_shuffle", "tempfile", @@ -8727,9 +9022,9 @@ dependencies = [ [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -8793,17 +9088,17 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "serde", ] [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -8853,6 +9148,7 @@ dependencies = [ "task_executor", "tempfile", "tokio", + "tokio-stream", "tree_hash", "types", "url", @@ -9037,9 +9333,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9047,24 +9343,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if", "js-sys", @@ -9074,9 +9370,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9084,22 +9380,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "wasm-streams" @@ -9164,61 +9460,14 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "web3" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" -dependencies = [ - "arrayvec", - "base64 0.13.1", - "bytes", - "derive_more", - "ethabi 16.0.0", - "ethereum-types 0.12.1", - "futures", - "futures-timer", - "headers", - "hex", - "idna 0.2.3", - "jsonrpc-core", - "log", - "once_cell", - "parking_lot 0.12.1", - "pin-project", - "reqwest", - "rlp", - "secp256k1", - "serde", - "serde_json", - "soketto", - "tiny-keccak", - "tokio", - "tokio-util 0.6.10", - "url", - "web3-async-native-tls", -] - -[[package]] -name = "web3-async-native-tls" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f6d8d1636b2627fe63518d5a9b38a569405d9c9bc665c43c9c341de57227ebb" -dependencies = [ - "native-tls", - "thiserror", - "tokio", - "url", -] - [[package]] name = "web3signer_tests" version = "0.1.0" @@ -9301,7 +9550,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.20", + "time 0.3.21", "tokio", "turn", "url", @@ -9333,12 +9582,12 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" +checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.1", + "aes-gcm 0.10.2", "async-trait", "bincode", "block-modes", @@ -9346,23 +9595,22 @@ dependencies = [ "ccm", "curve25519-dalek 3.2.0", "der-parser 8.2.0", - "elliptic-curve", + "elliptic-curve 0.12.3", "hkdf", "hmac 0.12.1", "log", - "oid-registry 0.6.1", "p256", "p384", "rand 0.8.5", "rand_core 0.6.4", - "rcgen 0.9.3", + "rcgen 0.10.0", "ring", "rustls 0.19.1", - "sec1", + "sec1 0.3.0", "serde", "sha1", "sha2 0.10.6", - "signature", + "signature 1.6.4", "subtle", "thiserror", "tokio", @@ -9390,7 +9638,7 @@ dependencies = [ "tokio", "turn", "url", - "uuid 1.3.0", + "uuid 1.3.3", "waitgroup", "webrtc-mdns", "webrtc-util", @@ -9411,18 +9659,15 @@ dependencies = [ [[package]] name = "webrtc-media" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a3c157a040324e5049bcbd644ffc9079e6738fa2cfab2bcff64e5cc4c00d7" +checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" dependencies = [ "byteorder", "bytes", - "derive_builder", - "displaydoc", "rand 0.8.5", "rtp", "thiserror", - "webrtc-util", ] [[package]] @@ -9562,11 +9807,11 @@ dependencies = [ [[package]] name = "windows" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets", + "windows-targets 0.48.0", ] [[package]] @@ -9587,12 +9832,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] @@ -9602,7 +9847,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -9611,21 +9865,42 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.34.0" @@ -9638,6 +9913,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.34.0" @@ -9650,6 +9931,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.34.0" @@ -9662,6 +9949,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.34.0" @@ -9674,12 +9967,24 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.34.0" @@ -9692,6 +9997,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winreg" version = "0.10.1" @@ -9774,7 +10085,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -9792,14 +10103,14 @@ dependencies = [ "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] name = "xml-rs" -version = "0.8.4" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +checksum = "1690519550bfa95525229b9ca2350c63043a4857b3b0013811b2ccf4a2420b01" [[package]] name = "xmltree" @@ -9835,11 +10146,11 @@ dependencies = [ [[package]] name = "yasna" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -9859,7 +10170,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1c84d55287..45ad0cfd0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ members = [ "common/lru_cache", "common/malloc_utils", "common/oneshot_broadcast", + "common/pretty_reqwest_error", "common/sensitive_url", "common/slot_clock", "common/system_health", diff --git a/Makefile b/Makefile index 89362d12d8..b833686e1b 100644 --- a/Makefile +++ b/Makefile @@ -145,8 +145,9 @@ test-op-pool-%: # Run the tests in the `slasher` crate for all supported database backends. test-slasher: - cargo test --release -p slasher --features mdbx - cargo test --release -p slasher --no-default-features --features lmdb + cargo test --release -p slasher --features lmdb + cargo test --release -p slasher --no-default-features --features mdbx + cargo test --release -p slasher --features lmdb,mdbx # both backends enabled # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -169,7 +170,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests -- \ + cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \ -D clippy::fn_to_numeric_cast_any \ -D warnings \ -A clippy::derive_partial_eq_without_eq \ @@ -179,6 +180,10 @@ lint: -A clippy::question-mark \ -A clippy::uninlined-format-args +# Lints the code using Clippy and automatically fix some simple compiler warnings. +lint-fix: + EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint + nightly-lint: cp .github/custom/clippy.toml . cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ diff --git a/README.md b/README.md index 3565882d6e..ade3bc2aba 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ An open-source Ethereum consensus client, written in Rust and maintained by Sigm [Book Link]: https://lighthouse-book.sigmaprime.io [stable]: https://github.com/sigp/lighthouse/tree/stable [unstable]: https://github.com/sigp/lighthouse/tree/unstable -[blog]: https://lighthouse.sigmaprime.io +[blog]: https://lighthouse-blog.sigmaprime.io [Documentation](https://lighthouse-book.sigmaprime.io) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 95f145a557..7c74365418 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.1.0" +version = "4.3.0" authors = ["Paul Hauner ", "Age Manning { signed_aggregate: &'a SignedAggregateAndProof, indexed_attestation: IndexedAttestation, - attestation_root: Hash256, + attestation_data_root: Hash256, } /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can @@ -467,14 +467,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { } // Ensure the valid aggregated attestation has not already been seen locally. - let attestation_root = attestation.tree_hash_root(); + let attestation_data = &attestation.data; + let attestation_data_root = attestation_data.tree_hash_root(); + if chain .observed_attestations .write() - .is_known(attestation, attestation_root) + .is_known_subset(attestation, attestation_data_root) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::AttestationAlreadyKnown(attestation_root)); + metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); + return Err(Error::AttestationSupersetKnown(attestation_data_root)); } let aggregator_index = signed_aggregate.message.aggregator_index; @@ -520,7 +523,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { if attestation.aggregation_bits.is_zero() { Err(Error::EmptyAggregationBitfield) } else { - Ok(attestation_root) + Ok(attestation_data_root) } } @@ -533,7 +536,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { let attestation = &signed_aggregate.message.aggregate; let aggregator_index = signed_aggregate.message.aggregator_index; - let attestation_root = match Self::verify_early_checks(signed_aggregate, chain) { + let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) { Ok(root) => root, Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), }; @@ -568,7 +571,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { Ok(IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_root, + attestation_data_root, }) } } @@ -577,7 +580,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { /// Run the checks that happen after the indexed attestation and signature have been checked. fn verify_late_checks( signed_aggregate: &SignedAggregateAndProof, - attestation_root: Hash256, + attestation_data_root: Hash256, chain: &BeaconChain, ) -> Result<(), Error> { let attestation = &signed_aggregate.message.aggregate; @@ -587,13 +590,14 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { // // It's important to double check that the attestation is not already known, otherwise two // attestations processed at the same time could be published. - if let ObserveOutcome::AlreadyKnown = chain + if let ObserveOutcome::Subset = chain .observed_attestations .write() - .observe_item(attestation, Some(attestation_root)) + .observe_item(attestation, Some(attestation_data_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::AttestationAlreadyKnown(attestation_root)); + metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); + return Err(Error::AttestationSupersetKnown(attestation_data_root)); } // Observe the aggregator so we don't process another aggregate from them. @@ -653,7 +657,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { let IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_root, + attestation_data_root, } = signed_aggregate; match check_signature { @@ -677,7 +681,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { CheckAttestationSignature::No => (), }; - if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_root, chain) { + if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) { return Err(SignatureValid(indexed_attestation, e)); } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 70853998e3..01343ff3b1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -63,7 +63,6 @@ use execution_layer::{ BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, }; -pub use fork_choice::CountUnrealized; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, @@ -555,7 +554,7 @@ impl BeaconChain { /// Persists `self.eth1_chain` and its caches to disk. pub fn persist_eth1_cache(&self) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); + let _timer = metrics::start_timer(&metrics::PERSIST_ETH1_CACHE); if let Some(eth1_chain) = self.eth1_chain.as_ref() { self.store @@ -2510,7 +2509,6 @@ impl BeaconChain { pub async fn process_chain_segment( self: &Arc, chain_segment: Vec>>, - count_unrealized: CountUnrealized, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2579,8 +2577,8 @@ impl BeaconChain { .process_block( signature_verified_block.block_root(), signature_verified_block, - count_unrealized, notify_execution_layer, + || Ok(()), ) .await { @@ -2668,8 +2666,8 @@ impl BeaconChain { self: &Arc, block_root: Hash256, unverified_block: B, - count_unrealized: CountUnrealized, notify_execution_layer: NotifyExecutionLayer, + publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2688,8 +2686,9 @@ impl BeaconChain { &chain, notify_execution_layer, )?; + publish_fn()?; chain - .import_execution_pending_block(execution_pending, count_unrealized) + .import_execution_pending_block(execution_pending) .await }; @@ -2729,7 +2728,7 @@ impl BeaconChain { } // The block failed verification. Err(other) => { - trace!( + debug!( self.log, "Beacon block rejected"; "reason" => other.to_string(), @@ -2744,10 +2743,9 @@ impl BeaconChain { /// /// An error is returned if the block was unable to be imported. It may be partially imported /// (i.e., this function is not atomic). - async fn import_execution_pending_block( + pub async fn import_execution_pending_block( self: Arc, execution_pending_block: ExecutionPendingBlock, - count_unrealized: CountUnrealized, ) -> Result> { let ExecutionPendingBlock { block, @@ -2808,7 +2806,6 @@ impl BeaconChain { state, confirmed_state_roots, payload_verification_status, - count_unrealized, parent_block, parent_eth1_finalization_data, consensus_context, @@ -2834,7 +2831,6 @@ impl BeaconChain { mut state: BeaconState, confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, - count_unrealized: CountUnrealized, parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, @@ -2902,8 +2898,9 @@ impl BeaconChain { block_delay, &state, payload_verification_status, + self.config.progressive_balances_mode, &self.spec, - count_unrealized, + &self.log, ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -5476,6 +5473,7 @@ impl BeaconChain { let shuffling_id = BlockShufflingIds { current: head_block.current_epoch_shuffling_id.clone(), next: head_block.next_epoch_shuffling_id.clone(), + previous: None, block_root: head_block.root, } .id_for_epoch(shuffling_epoch) @@ -5706,13 +5704,9 @@ impl BeaconChain { /// Since we are likely calling this during the slot we are going to propose in, don't take into /// account the current slot when accounting for skips. pub fn is_healthy(&self, parent_root: &Hash256) -> Result { + let cached_head = self.canonical_head.cached_head(); // Check if the merge has been finalized. - if let Some(finalized_hash) = self - .canonical_head - .cached_head() - .forkchoice_update_parameters() - .finalized_hash - { + if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { if ExecutionBlockHash::zero() == finalized_hash { return Ok(ChainHealth::PreMerge); } @@ -5739,17 +5733,13 @@ impl BeaconChain { // Check slots at the head of the chain. let prev_slot = current_slot.saturating_sub(Slot::new(1)); - let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips = prev_slot.saturating_sub(cached_head.head_slot()); let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; // Check if finalization is advancing. let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let epochs_since_finalization = current_epoch.saturating_sub( - self.canonical_head - .cached_head() - .finalized_checkpoint() - .epoch, - ); + let epochs_since_finalization = + current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch); let finalization_check = epochs_since_finalization.as_usize() <= self.config.builder_fallback_epochs_since_finalization; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ca4df864db..492f492521 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -52,6 +52,7 @@ use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; +use crate::observed_block_producers::SeenBlock; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -141,8 +142,6 @@ pub enum BlockError { /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. ParentUnknown(Arc>), - /// The block skips too many slots and is a DoS risk. - TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -183,13 +182,6 @@ pub enum BlockError { /// /// The block is valid and we have already imported a block with this hash. BlockIsAlreadyKnown, - /// A block for this proposer and slot has already been observed. - /// - /// ## Peer scoring - /// - /// The `proposer` has already proposed a block at this slot. The existing block may or may not - /// be equal to the given block. - RepeatProposal { proposer: u64, slot: Slot }, /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -285,6 +277,13 @@ pub enum BlockError { /// problems to worry about than losing peers, and we're doing the network a favour by /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, + /// The block is a slashable equivocation from the proposer. + /// + /// ## Peer scoring + /// + /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so + /// we penalise them with a mid-tolerance error. + Slashable, } /// Returned when block validation failed due to some issue verifying @@ -633,6 +632,40 @@ pub struct ExecutionPendingBlock { pub payload_verification_handle: PayloadVerificationHandle, } +pub trait IntoGossipVerifiedBlock: Sized { + fn into_gossip_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError>; + fn inner(&self) -> Arc>; +} + +impl IntoGossipVerifiedBlock for GossipVerifiedBlock { + fn into_gossip_verified_block( + self, + _chain: &BeaconChain, + ) -> Result, BlockError> { + Ok(self) + } + + fn inner(&self) -> Arc> { + self.block.clone() + } +} + +impl IntoGossipVerifiedBlock for Arc> { + fn into_gossip_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError> { + GossipVerifiedBlock::new(self, chain) + } + + fn inner(&self) -> Arc> { + self.clone() + } +} + /// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. @@ -729,25 +762,12 @@ impl GossipVerifiedBlock { return Err(BlockError::BlockIsAlreadyKnown); } - // Check that we have not already received a block with a valid signature for this slot. - if chain - .observed_block_producers - .read() - .proposer_has_been_observed(block.message()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - { - return Err(BlockError::RepeatProposal { - proposer: block.message().proposer_index(), - slot: block.slot(), - }); - } - // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. check_block_is_finalized_checkpoint_or_descendant( chain, - &chain.canonical_head.fork_choice_write_lock(), + &chain.canonical_head.fork_choice_read_lock(), &block, )?; @@ -786,9 +806,6 @@ impl GossipVerifiedBlock { parent_block.root }; - // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent_block.slot, block.message())?; - // We assign to a variable instead of using `if let Some` directly to ensure we drop the // write lock before trying to acquire it again in the `else` clause. let proposer_opt = chain @@ -860,17 +877,16 @@ impl GossipVerifiedBlock { // // It's important to double-check that the proposer still hasn't been observed so we don't // have a race-condition when verifying two blocks simultaneously. - if chain + match chain .observed_block_producers .write() - .observe_proposer(block.message()) + .observe_proposal(block_root, block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { - return Err(BlockError::RepeatProposal { - proposer: block.message().proposer_index(), - slot: block.slot(), - }); - } + SeenBlock::Slashable => return Err(BlockError::Slashable), + SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), + SeenBlock::UniqueNonSlashable => {} + }; if block.message().proposer_index() != expected_proposer as u64 { return Err(BlockError::IncorrectBlockProposer { @@ -942,9 +958,6 @@ impl SignatureVerifiedBlock { let (mut parent, block) = load_parent(block_root, block, chain)?; - // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; - let state = cheap_state_advance_to_obtain_committees( &mut parent.pre_state, parent.beacon_state_root, @@ -1109,6 +1122,12 @@ impl ExecutionPendingBlock { chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { + chain + .observed_block_producers + .write() + .observe_proposal(block_root, block.message()) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(parent) = chain .canonical_head .fork_choice_read_lock() @@ -1135,9 +1154,6 @@ impl ExecutionPendingBlock { return Err(BlockError::ParentUnknown(block)); } - // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; - /* * Perform cursory checks to see if the block is even worth processing. */ @@ -1492,30 +1508,6 @@ impl ExecutionPendingBlock { } } -/// Check that the count of skip slots between the block and its parent does not exceed our maximum -/// value. -/// -/// Whilst this is not part of the specification, we include this to help prevent us from DoS -/// attacks. In times of dire network circumstance, the user can configure the -/// `import_max_skip_slots` value. -fn check_block_skip_slots( - chain: &BeaconChain, - parent_slot: Slot, - block: BeaconBlockRef<'_, T::EthSpec>, -) -> Result<(), BlockError> { - // Reject any block that exceeds our limit on skipped slots. - if let Some(max_skip_slots) = chain.config.import_max_skip_slots { - if block.slot() > parent_slot + max_skip_slots { - return Err(BlockError::TooManySkippedSlots { - parent_slot, - block_slot: block.slot(), - }); - } - } - - Ok(()) -} - /// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any). fn check_block_against_anchor_slot( block: BeaconBlockRef<'_, T::EthSpec>, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ca377635d6..044391c415 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -6,7 +6,7 @@ use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_bound use crate::head_tracker::HeadTracker; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; -use crate::shuffling_cache::ShufflingCache; +use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::ValidatorMonitor; @@ -18,7 +18,7 @@ use crate::{ }; use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; -use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; +use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; @@ -338,7 +338,7 @@ where let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; beacon_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; let beacon_state_root = beacon_block.message().state_root(); @@ -437,7 +437,7 @@ where // Prime all caches before storing the state in the database and computing the tree hash // root. weak_subj_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; let computed_state_root = weak_subj_state @@ -687,10 +687,13 @@ where store.clone(), Some(current_slot), &self.spec, - CountUnrealized::True, + self.chain_config.progressive_balances_mode, + &log, )?; } + let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?; + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, beacon_block: Arc::new(head_block), @@ -699,7 +702,7 @@ where head_snapshot .beacon_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; // Perform a check to ensure that the finalization points of the head and fork choice are @@ -825,7 +828,6 @@ where observed_sync_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), - // TODO: allow for persisting and loading the pool from disk. observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), @@ -847,7 +849,11 @@ where DEFAULT_SNAPSHOT_CACHE_SIZE, head_for_snapshot_cache, )), - shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(shuffling_cache_size)), + shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( + shuffling_cache_size, + head_shuffling_ids, + log.clone(), + )), eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 0e1c8a5305..2b1f714362 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -31,7 +31,9 @@ //! the head block root. This is unacceptable for fast-responding functions like the networking //! stack. +use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT; use crate::persisted_fork_choice::PersistedForkChoice; +use crate::shuffling_cache::BlockShufflingIds; use crate::{ beacon_chain::{ BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, @@ -846,6 +848,35 @@ impl BeaconChain { ); }); + match BlockShufflingIds::try_from_head( + new_snapshot.beacon_block_root, + &new_snapshot.beacon_state, + ) { + Ok(head_shuffling_ids) => { + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .map(|mut shuffling_cache| { + shuffling_cache.update_head_shuffling_ids(head_shuffling_ids) + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "shuffling_cache", + "task" => "update head shuffling decision root" + ); + }); + } + Err(e) => { + error!( + self.log, + "Failed to get head shuffling ids"; + "error" => ?e, + "head_block_root" => ?new_snapshot.beacon_block_root + ); + } + } + observe_head_block_delays( &mut self.block_times_cache.write(), &new_head_proto_block, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index a74fdced1f..cc7a957ecc 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,7 +1,7 @@ pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; -use types::{Checkpoint, Epoch}; +use types::{Checkpoint, Epoch, ProgressiveBalancesMode}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); @@ -17,8 +17,7 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { - /// Maximum number of slots to skip when importing a consensus message (e.g., block, - /// attestation, etc). + /// Maximum number of slots to skip when importing an attestation. /// /// If `None`, there is no limit. pub import_max_skip_slots: Option, @@ -82,6 +81,8 @@ pub struct ChainConfig { pub always_prepare_payload: bool, /// Whether backfill sync processing should be rate-limited. pub enable_backfill_rate_limiting: bool, + /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. + pub progressive_balances_mode: ProgressiveBalancesMode, } impl Default for ChainConfig { @@ -112,6 +113,7 @@ impl Default for ChainConfig { genesis_backfill: false, always_prepare_payload: false, enable_backfill_rate_limiting: true, + progressive_balances_mode: ProgressiveBalancesMode::Checked, } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e789b54a21..50bcf42653 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -213,6 +213,7 @@ pub enum BeaconChainError { BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), + UnableToPublish, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index ccd17af243..dc0e34277c 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,5 +1,5 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; +use fork_choice::{ForkChoice, PayloadVerificationStatus}; use itertools::process_results; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; @@ -10,7 +10,10 @@ use state_processing::{ use std::sync::Arc; use std::time::Duration; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; -use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock, + Slot, +}; const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ consider deleting it by running with the --purge-db flag."; @@ -100,7 +103,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It store: Arc>, current_slot: Option, spec: &ChainSpec, - count_unrealized_config: CountUnrealized, + progressive_balances_mode: ProgressiveBalancesMode, + log: &Logger, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -166,8 +170,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; let mut state = finalized_snapshot.beacon_state; - let blocks_len = blocks.len(); - for (i, block) in blocks.into_iter().enumerate() { + for block in blocks { complete_state_advance(&mut state, None, block.slot(), spec) .map_err(|e| format!("State advance failed: {:?}", e))?; @@ -190,15 +193,6 @@ pub fn reset_fork_choice_to_finalization, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; - // Because we are replaying a single chain of blocks, we only need to calculate unrealized - // justification for the last block in the chain. - let is_last_block = i + 1 == blocks_len; - let count_unrealized = if is_last_block { - count_unrealized_config - } else { - CountUnrealized::False - }; - fork_choice .on_block( block.slot(), @@ -208,8 +202,9 @@ pub fn reset_fork_choice_to_finalization, Cold: It Duration::from_secs(0), &state, payload_verification_status, + progressive_balances_mode, spec, - count_unrealized, + log, ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index be1522a3b8..c5cf74e179 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -52,8 +52,8 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, - StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, + WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; @@ -64,6 +64,7 @@ pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, + IntoExecutionPendingBlock, IntoGossipVerifiedBlock, }; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b52c4258fe..dff663ded0 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -874,6 +874,14 @@ lazy_static! { "beacon_sync_committee_message_gossip_verification_seconds", "Full runtime of sync contribution gossip verification" ); + pub static ref SYNC_MESSAGE_EQUIVOCATIONS: Result = try_create_int_counter( + "sync_message_equivocations_total", + "Number of sync messages with the same validator index for different blocks" + ); + pub static ref SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD: Result = try_create_int_counter( + "sync_message_equivocations_to_head_total", + "Number of sync message which conflict with a previous message but elect the head" + ); /* * Sync Committee Contribution Verification @@ -990,6 +998,17 @@ lazy_static! { "light_client_optimistic_update_verification_success_total", "Number of light client optimistic updates verified for gossip" ); + /* + * Aggregate subset metrics + */ + pub static ref SYNC_CONTRIBUTION_SUBSETS: Result = try_create_int_counter( + "beacon_sync_contribution_subsets_total", + "Count of new sync contributions that are subsets of already known aggregates" + ); + pub static ref AGGREGATED_ATTESTATION_SUBSETS: Result = try_create_int_counter( + "beacon_aggregated_attestation_subsets_total", + "Count of new aggregated attestations that are subsets of already known aggregates" + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index bb0132f5fe..18a761e29e 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -1,7 +1,9 @@ //! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or //! sync committee contributions if we've already seen them. -use std::collections::HashSet; +use crate::sync_committee_verification::SyncCommitteeData; +use ssz_types::{BitList, BitVector}; +use std::collections::HashMap; use std::marker::PhantomData; use tree_hash::TreeHash; use types::consts::altair::{ @@ -10,8 +12,16 @@ use types::consts::altair::{ use types::slot_data::SlotData; use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; -pub type ObservedSyncContributions = ObservedAggregates, E>; -pub type ObservedAggregateAttestations = ObservedAggregates, E>; +pub type ObservedSyncContributions = ObservedAggregates< + SyncCommitteeContribution, + E, + BitVector<::SyncSubcommitteeSize>, +>; +pub type ObservedAggregateAttestations = ObservedAggregates< + Attestation, + E, + BitList<::MaxValidatorsPerCommittee>, +>; /// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`. pub trait Consts { @@ -69,10 +79,81 @@ impl Consts for SyncCommitteeContribution { } } +/// A trait for types that implement a behaviour where one object of that type +/// can be a subset/superset of another. +/// This trait allows us to be generic over the aggregate item that we store in the cache that +/// we want to prevent duplicates/subsets for. +pub trait SubsetItem { + /// The item that is stored for later comparison with new incoming aggregate items. + type Item; + + /// Returns `true` if `self` is a non-strict subset of `other` and `false` otherwise. + fn is_subset(&self, other: &Self::Item) -> bool; + + /// Returns `true` if `self` is a non-strict superset of `other` and `false` otherwise. + fn is_superset(&self, other: &Self::Item) -> bool; + + /// Returns the item that gets stored in `ObservedAggregates` for later subset + /// comparison with incoming aggregates. + fn get_item(&self) -> Self::Item; + + /// Returns a unique value that keys the object to the item that is being stored + /// in `ObservedAggregates`. + fn root(&self) -> Hash256; +} + +impl SubsetItem for Attestation { + type Item = BitList; + fn is_subset(&self, other: &Self::Item) -> bool { + self.aggregation_bits.is_subset(other) + } + + fn is_superset(&self, other: &Self::Item) -> bool { + other.is_subset(&self.aggregation_bits) + } + + /// Returns the sync contribution aggregation bits. + fn get_item(&self) -> Self::Item { + self.aggregation_bits.clone() + } + + /// Returns the hash tree root of the attestation data. + fn root(&self) -> Hash256 { + self.data.tree_hash_root() + } +} + +impl SubsetItem for SyncCommitteeContribution { + type Item = BitVector; + fn is_subset(&self, other: &Self::Item) -> bool { + self.aggregation_bits.is_subset(other) + } + + fn is_superset(&self, other: &Self::Item) -> bool { + other.is_subset(&self.aggregation_bits) + } + + /// Returns the sync contribution aggregation bits. + fn get_item(&self) -> Self::Item { + self.aggregation_bits.clone() + } + + /// Returns the hash tree root of the root, slot and subcommittee index + /// of the sync contribution. + fn root(&self) -> Hash256 { + SyncCommitteeData { + root: self.beacon_block_root, + slot: self.slot, + subcommittee_index: self.subcommittee_index, + } + .tree_hash_root() + } +} + #[derive(Debug, PartialEq)] pub enum ObserveOutcome { - /// This item was already known. - AlreadyKnown, + /// This item is a non-strict subset of an already known item. + Subset, /// This was the first time this item was observed. New, } @@ -94,26 +175,28 @@ pub enum Error { }, } -/// A `HashSet` that contains entries related to some `Slot`. -struct SlotHashSet { - set: HashSet, +/// A `HashMap` that contains entries related to some `Slot`. +struct SlotHashSet { + /// Contains a vector of maximally-sized aggregation bitfields/bitvectors + /// such that no bitfield/bitvector is a subset of any other in the list. + map: HashMap>, slot: Slot, max_capacity: usize, } -impl SlotHashSet { +impl SlotHashSet { pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self { Self { slot, - set: HashSet::with_capacity(initial_capacity), + map: HashMap::with_capacity(initial_capacity), max_capacity, } } /// Store the items in self so future observations recognise its existence. - pub fn observe_item( + pub fn observe_item>( &mut self, - item: &T, + item: &S, root: Hash256, ) -> Result { if item.get_slot() != self.slot { @@ -123,29 +206,45 @@ impl SlotHashSet { }); } - if self.set.contains(&root) { - Ok(ObserveOutcome::AlreadyKnown) - } else { - // Here we check to see if this slot has reached the maximum observation count. - // - // The resulting behaviour is that we are no longer able to successfully observe new - // items, however we will continue to return `is_known` values. We could also - // disable `is_known`, however then we would stop forwarding items across the - // gossip network and I think that this is a worse case than sending some invalid ones. - // The underlying libp2p network is responsible for removing duplicate messages, so - // this doesn't risk a broadcast loop. - if self.set.len() >= self.max_capacity { - return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); + if let Some(aggregates) = self.map.get_mut(&root) { + for existing in aggregates { + // Check if `item` is a subset of any of the observed aggregates + if item.is_subset(existing) { + return Ok(ObserveOutcome::Subset); + // Check if `item` is a superset of any of the observed aggregates + // If true, we replace the new item with its existing subset. This allows us + // to hold fewer items in the list. + } else if item.is_superset(existing) { + *existing = item.get_item(); + return Ok(ObserveOutcome::New); + } } - - self.set.insert(root); - - Ok(ObserveOutcome::New) } + + // Here we check to see if this slot has reached the maximum observation count. + // + // The resulting behaviour is that we are no longer able to successfully observe new + // items, however we will continue to return `is_known_subset` values. We could also + // disable `is_known_subset`, however then we would stop forwarding items across the + // gossip network and I think that this is a worse case than sending some invalid ones. + // The underlying libp2p network is responsible for removing duplicate messages, so + // this doesn't risk a broadcast loop. + if self.map.len() >= self.max_capacity { + return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); + } + + let item = item.get_item(); + self.map.entry(root).or_default().push(item); + Ok(ObserveOutcome::New) } - /// Indicates if `item` has been observed before. - pub fn is_known(&self, item: &T, root: Hash256) -> Result { + /// Check if `item` is a non-strict subset of any of the already observed aggregates for + /// the given root and slot. + pub fn is_known_subset>( + &self, + item: &S, + root: Hash256, + ) -> Result { if item.get_slot() != self.slot { return Err(Error::IncorrectSlot { expected: self.slot, @@ -153,25 +252,28 @@ impl SlotHashSet { }); } - Ok(self.set.contains(&root)) + Ok(self + .map + .get(&root) + .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) } /// The number of observed items in `self`. pub fn len(&self) -> usize { - self.set.len() + self.map.len() } } /// Stores the roots of objects for some number of `Slots`, so we can determine if /// these have previously been seen on the network. -pub struct ObservedAggregates { +pub struct ObservedAggregates { lowest_permissible_slot: Slot, - sets: Vec, + sets: Vec>, _phantom_spec: PhantomData, _phantom_tree_hash: PhantomData, } -impl Default for ObservedAggregates { +impl Default for ObservedAggregates { fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), @@ -182,17 +284,17 @@ impl Default for ObservedAggregates } } -impl ObservedAggregates { - /// Store the root of `item` in `self`. +impl, E: EthSpec, I> ObservedAggregates { + /// Store `item` in `self` keyed at `root`. /// - /// `root` must equal `item.tree_hash_root()`. + /// `root` must equal `item.root::()`. pub fn observe_item( &mut self, item: &T, root_opt: Option, ) -> Result { let index = self.get_set_index(item.get_slot())?; - let root = root_opt.unwrap_or_else(|| item.tree_hash_root()); + let root = root_opt.unwrap_or_else(|| item.root()); self.sets .get_mut(index) @@ -200,17 +302,18 @@ impl ObservedAggregates { .and_then(|set| set.observe_item(item, root)) } - /// Check to see if the `root` of `item` is in self. + /// Check if `item` is a non-strict subset of any of the already observed aggregates for + /// the given root and slot. /// - /// `root` must equal `a.tree_hash_root()`. + /// `root` must equal `item.root::()`. #[allow(clippy::wrong_self_convention)] - pub fn is_known(&mut self, item: &T, root: Hash256) -> Result { + pub fn is_known_subset(&mut self, item: &T, root: Hash256) -> Result { let index = self.get_set_index(item.get_slot())?; self.sets .get(index) .ok_or(Error::InvalidSetIndex(index)) - .and_then(|set| set.is_known(item, root)) + .and_then(|set| set.is_known_subset(item, root)) } /// The maximum number of slots that items are stored for. @@ -296,7 +399,6 @@ impl ObservedAggregates { #[cfg(not(debug_assertions))] mod tests { use super::*; - use tree_hash::TreeHash; use types::{test_utils::test_random_instance, Hash256}; type E = types::MainnetEthSpec; @@ -330,7 +432,7 @@ mod tests { for a in &items { assert_eq!( - store.is_known(a, a.tree_hash_root()), + store.is_known_subset(a, a.root()), Ok(false), "should indicate an unknown attestation is unknown" ); @@ -343,13 +445,13 @@ mod tests { for a in &items { assert_eq!( - store.is_known(a, a.tree_hash_root()), + store.is_known_subset(a, a.root()), Ok(true), "should indicate a known attestation is known" ); assert_eq!( - store.observe_item(a, Some(a.tree_hash_root())), - Ok(ObserveOutcome::AlreadyKnown), + store.observe_item(a, Some(a.root())), + Ok(ObserveOutcome::Subset), "should acknowledge an existing attestation" ); } diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index ed22beaec6..59c67bd1b9 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -20,7 +20,7 @@ use std::collections::{HashMap, HashSet}; use std::hash::Hash; use std::marker::PhantomData; use types::slot_data::SlotData; -use types::{Epoch, EthSpec, Slot, Unsigned}; +use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// @@ -39,10 +39,10 @@ pub const MAX_CACHED_EPOCHS: u64 = 3; pub type ObservedAttesters = AutoPruningEpochContainer; pub type ObservedSyncContributors = - AutoPruningSlotContainer, E>; + AutoPruningSlotContainer, E>; pub type ObservedAggregators = AutoPruningEpochContainer; pub type ObservedSyncAggregators = - AutoPruningSlotContainer; + AutoPruningSlotContainer; #[derive(Debug, PartialEq)] pub enum Error { @@ -62,7 +62,7 @@ pub enum Error { } /// Implemented on an item in an `AutoPruningContainer`. -pub trait Item { +pub trait Item { /// Instantiate `Self` with the given `capacity`. fn with_capacity(capacity: usize) -> Self; @@ -75,11 +75,11 @@ pub trait Item { /// Returns the number of validators that have been observed by `self`. fn validator_count(&self) -> usize; - /// Store `validator_index` in `self`. - fn insert(&mut self, validator_index: usize) -> bool; + /// Store `validator_index` and `value` in `self`. + fn insert(&mut self, validator_index: usize, value: T) -> bool; - /// Returns `true` if `validator_index` has been stored in `self`. - fn contains(&self, validator_index: usize) -> bool; + /// Returns `Some(T)` if there is an entry for `validator_index`. + fn get(&self, validator_index: usize) -> Option; } /// Stores a `BitVec` that represents which validator indices have attested or sent sync committee @@ -88,7 +88,7 @@ pub struct EpochBitfield { bitfield: BitVec, } -impl Item for EpochBitfield { +impl Item<()> for EpochBitfield { fn with_capacity(capacity: usize) -> Self { Self { bitfield: BitVec::with_capacity(capacity), @@ -108,7 +108,7 @@ impl Item for EpochBitfield { self.bitfield.iter().filter(|bit| **bit).count() } - fn insert(&mut self, validator_index: usize) -> bool { + fn insert(&mut self, validator_index: usize, _value: ()) -> bool { self.bitfield .get_mut(validator_index) .map(|mut bit| { @@ -129,8 +129,11 @@ impl Item for EpochBitfield { }) } - fn contains(&self, validator_index: usize) -> bool { - self.bitfield.get(validator_index).map_or(false, |bit| *bit) + fn get(&self, validator_index: usize) -> Option<()> { + self.bitfield + .get(validator_index) + .map_or(false, |bit| *bit) + .then_some(()) } } @@ -140,7 +143,7 @@ pub struct EpochHashSet { set: HashSet, } -impl Item for EpochHashSet { +impl Item<()> for EpochHashSet { fn with_capacity(capacity: usize) -> Self { Self { set: HashSet::with_capacity(capacity), @@ -163,27 +166,27 @@ impl Item for EpochHashSet { /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. - fn insert(&mut self, validator_index: usize) -> bool { + fn insert(&mut self, validator_index: usize, _value: ()) -> bool { !self.set.insert(validator_index) } /// Returns `true` if the `validator_index` is in the set. - fn contains(&self, validator_index: usize) -> bool { - self.set.contains(&validator_index) + fn get(&self, validator_index: usize) -> Option<()> { + self.set.contains(&validator_index).then_some(()) } } /// Stores a `HashSet` of which validator indices have created a sync aggregate during a /// slot. pub struct SyncContributorSlotHashSet { - set: HashSet, + map: HashMap, phantom: PhantomData, } -impl Item for SyncContributorSlotHashSet { +impl Item for SyncContributorSlotHashSet { fn with_capacity(capacity: usize) -> Self { Self { - set: HashSet::with_capacity(capacity), + map: HashMap::with_capacity(capacity), phantom: PhantomData, } } @@ -194,22 +197,24 @@ impl Item for SyncContributorSlotHashSet { } fn len(&self) -> usize { - self.set.len() + self.map.len() } fn validator_count(&self) -> usize { - self.set.len() + self.map.len() } /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. - fn insert(&mut self, validator_index: usize) -> bool { - !self.set.insert(validator_index) + fn insert(&mut self, validator_index: usize, beacon_block_root: Hash256) -> bool { + self.map + .insert(validator_index, beacon_block_root) + .is_some() } /// Returns `true` if the `validator_index` is in the set. - fn contains(&self, validator_index: usize) -> bool { - self.set.contains(&validator_index) + fn get(&self, validator_index: usize) -> Option { + self.map.get(&validator_index).copied() } } @@ -219,7 +224,7 @@ pub struct SyncAggregatorSlotHashSet { set: HashSet, } -impl Item for SyncAggregatorSlotHashSet { +impl Item<()> for SyncAggregatorSlotHashSet { fn with_capacity(capacity: usize) -> Self { Self { set: HashSet::with_capacity(capacity), @@ -241,13 +246,13 @@ impl Item for SyncAggregatorSlotHashSet { /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. - fn insert(&mut self, validator_index: usize) -> bool { + fn insert(&mut self, validator_index: usize, _value: ()) -> bool { !self.set.insert(validator_index) } /// Returns `true` if the `validator_index` is in the set. - fn contains(&self, validator_index: usize) -> bool { - self.set.contains(&validator_index) + fn get(&self, validator_index: usize) -> Option<()> { + self.set.contains(&validator_index).then_some(()) } } @@ -275,7 +280,7 @@ impl Default for AutoPruningEpochContainer { } } -impl AutoPruningEpochContainer { +impl, E: EthSpec> AutoPruningEpochContainer { /// Observe that `validator_index` has produced attestation `a`. Returns `Ok(true)` if `a` has /// previously been observed for `validator_index`. /// @@ -293,7 +298,7 @@ impl AutoPruningEpochContainer { self.prune(epoch); if let Some(item) = self.items.get_mut(&epoch) { - Ok(item.insert(validator_index)) + Ok(item.insert(validator_index, ())) } else { // To avoid re-allocations, try and determine a rough initial capacity for the new item // by obtaining the mean size of all items in earlier epoch. @@ -309,7 +314,7 @@ impl AutoPruningEpochContainer { let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity); let mut item = T::with_capacity(initial_capacity); - item.insert(validator_index); + item.insert(validator_index, ()); self.items.insert(epoch, item); Ok(false) @@ -333,7 +338,7 @@ impl AutoPruningEpochContainer { let exists = self .items .get(&epoch) - .map_or(false, |item| item.contains(validator_index)); + .map_or(false, |item| item.get(validator_index).is_some()); Ok(exists) } @@ -392,7 +397,7 @@ impl AutoPruningEpochContainer { pub fn index_seen_at_epoch(&self, index: usize, epoch: Epoch) -> bool { self.items .get(&epoch) - .map(|item| item.contains(index)) + .map(|item| item.get(index).is_some()) .unwrap_or(false) } } @@ -405,23 +410,63 @@ impl AutoPruningEpochContainer { /// sync contributions with an epoch prior to `data.slot - 3` will be cleared from the cache. /// /// `V` should be set to a `SyncAggregatorSlotHashSet` or a `SyncContributorSlotHashSet`. -pub struct AutoPruningSlotContainer { +pub struct AutoPruningSlotContainer { lowest_permissible_slot: Slot, items: HashMap, - _phantom: PhantomData, + _phantom_e: PhantomData, + _phantom_s: PhantomData, } -impl Default for AutoPruningSlotContainer { +impl Default for AutoPruningSlotContainer { fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), items: HashMap::new(), - _phantom: PhantomData, + _phantom_e: PhantomData, + _phantom_s: PhantomData, } } } -impl AutoPruningSlotContainer { +impl, E: EthSpec> + AutoPruningSlotContainer +{ + /// Observes the given `value` for the given `validator_index`. + /// + /// The `override_observation` function is supplied `previous_observation` + /// and `value`. If it returns `true`, then any existing observation will be + /// overridden. + /// + /// This function returns `Some` if: + /// - An observation already existed for the validator, AND, + /// - The `override_observation` function returned `false`. + /// + /// Alternatively, it returns `None` if: + /// - An observation did not already exist for the given validator, OR, + /// - The `override_observation` function returned `true`. + pub fn observe_validator_with_override( + &mut self, + key: K, + validator_index: usize, + value: S, + override_observation: F, + ) -> Result, Error> + where + F: Fn(&S, &S) -> bool, + { + if let Some(prev_observation) = self.observation_for_validator(key, validator_index)? { + if override_observation(&prev_observation, &value) { + self.observe_validator(key, validator_index, value)?; + Ok(None) + } else { + Ok(Some(prev_observation)) + } + } else { + self.observe_validator(key, validator_index, value)?; + Ok(None) + } + } + /// Observe that `validator_index` has produced a sync committee message. Returns `Ok(true)` if /// the sync committee message has previously been observed for `validator_index`. /// @@ -429,14 +474,19 @@ impl AutoPruningSlotContainer Result { + pub fn observe_validator( + &mut self, + key: K, + validator_index: usize, + value: S, + ) -> Result { let slot = key.get_slot(); self.sanitize_request(slot, validator_index)?; self.prune(slot); if let Some(item) = self.items.get_mut(&key) { - Ok(item.insert(validator_index)) + Ok(item.insert(validator_index, value)) } else { // To avoid re-allocations, try and determine a rough initial capacity for the new item // by obtaining the mean size of all items in earlier slot. @@ -452,32 +502,45 @@ impl AutoPruningSlotContainer Result { + self.observation_for_validator(key, validator_index) + .map(|observation| observation.is_some()) + } + + /// Returns `Ok(Some)` if the `validator_index` has already produced a + /// conflicting sync committee message. + /// + /// ## Errors + /// + /// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`. + /// - `key.slot` is earlier than `self.lowest_permissible_slot`. + pub fn observation_for_validator( + &self, + key: K, + validator_index: usize, + ) -> Result, Error> { self.sanitize_request(key.get_slot(), validator_index)?; - let exists = self + let observation = self .items .get(&key) - .map_or(false, |item| item.contains(validator_index)); + .and_then(|item| item.get(validator_index)); - Ok(exists) + Ok(observation) } /// Returns the number of validators that have been observed at the given `slot`. Returns @@ -561,6 +624,116 @@ mod tests { type E = types::MainnetEthSpec; + #[test] + fn value_storage() { + type Container = AutoPruningSlotContainer, E>; + + let mut store: Container = <_>::default(); + let key = Slot::new(0); + let validator_index = 0; + let value = Hash256::zero(); + + // Assert there is no entry. + assert!(store + .observation_for_validator(key, validator_index) + .unwrap() + .is_none()); + assert!(!store + .validator_has_been_observed(key, validator_index) + .unwrap()); + + // Add an entry. + assert!(!store + .observe_validator(key, validator_index, value) + .unwrap()); + + // Assert there is a correct entry. + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(value) + ); + assert!(store + .validator_has_been_observed(key, validator_index) + .unwrap()); + + let alternate_value = Hash256::from_low_u64_be(1); + + // Assert that override false does not override. + assert_eq!( + store + .observe_validator_with_override(key, validator_index, alternate_value, |_, _| { + false + }) + .unwrap(), + Some(value) + ); + + // Assert that override true overrides and acts as if there was never an + // entry there. + assert_eq!( + store + .observe_validator_with_override(key, validator_index, alternate_value, |_, _| { + true + }) + .unwrap(), + None + ); + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(alternate_value) + ); + + // Reset the store. + let mut store: Container = <_>::default(); + + // Asset that a new entry with override = false is inserted + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + None + ); + assert_eq!( + store + .observe_validator_with_override(key, validator_index, value, |_, _| { false }) + .unwrap(), + None, + ); + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(value) + ); + + // Reset the store. + let mut store: Container = <_>::default(); + + // Asset that a new entry with override = true is inserted + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + None + ); + assert_eq!( + store + .observe_validator_with_override(key, validator_index, value, |_, _| { true }) + .unwrap(), + None, + ); + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(value) + ); + } + macro_rules! test_suite_epoch { ($mod_name: ident, $type: ident) => { #[cfg(test)] @@ -722,7 +895,7 @@ mod tests { test_suite_epoch!(observed_aggregators, ObservedAggregators); macro_rules! test_suite_slot { - ($mod_name: ident, $type: ident) => { + ($mod_name: ident, $type: ident, $value: expr) => { #[cfg(test)] mod $mod_name { use super::*; @@ -737,7 +910,7 @@ mod tests { "should indicate an unknown item is unknown" ); assert_eq!( - store.observe_validator(key, i), + store.observe_validator(key, i, $value), Ok(false), "should observe new item" ); @@ -750,7 +923,7 @@ mod tests { "should indicate a known item is known" ); assert_eq!( - store.observe_validator(key, i), + store.observe_validator(key, i, $value), Ok(true), "should acknowledge an existing item" ); @@ -997,6 +1170,10 @@ mod tests { } }; } - test_suite_slot!(observed_sync_contributors, ObservedSyncContributors); - test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators); + test_suite_slot!( + observed_sync_contributors, + ObservedSyncContributors, + Hash256::zero() + ); + test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators, ()); } diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index b5995121b9..f76fc53796 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -1,9 +1,10 @@ //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from //! validators that have already produced a block. +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; +use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; #[derive(Debug, PartialEq)] pub enum Error { @@ -14,6 +15,12 @@ pub enum Error { ValidatorIndexTooHigh(u64), } +#[derive(Eq, Hash, PartialEq, Debug, Default)] +struct ProposalKey { + slot: Slot, + proposer: u64, +} + /// Maintains a cache of observed `(block.slot, block.proposer)`. /// /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you @@ -27,7 +34,7 @@ pub enum Error { /// known_distinct_shufflings` which is much smaller. pub struct ObservedBlockProducers { finalized_slot: Slot, - items: HashMap>, + items: HashMap>, _phantom: PhantomData, } @@ -42,6 +49,24 @@ impl Default for ObservedBlockProducers { } } +pub enum SeenBlock { + Duplicate, + Slashable, + UniqueNonSlashable, +} + +impl SeenBlock { + pub fn proposer_previously_observed(self) -> bool { + match self { + Self::Duplicate | Self::Slashable => true, + Self::UniqueNonSlashable => false, + } + } + pub fn is_slashable(&self) -> bool { + matches!(self, Self::Slashable) + } +} + impl ObservedBlockProducers { /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will /// update `self` so future calls to it indicate that this block is known. @@ -52,16 +77,44 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn observe_proposal( + &mut self, + block_root: Hash256, + block: BeaconBlockRef<'_, E>, + ) -> Result { self.sanitize_block(block)?; - let did_not_exist = self - .items - .entry(block.slot()) - .or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) - .insert(block.proposer_index()); + let key = ProposalKey { + slot: block.slot(), + proposer: block.proposer_index(), + }; - Ok(!did_not_exist) + let entry = self.items.entry(key); + + let slashable_proposal = match entry { + Entry::Occupied(mut occupied_entry) => { + let block_roots = occupied_entry.get_mut(); + let newly_inserted = block_roots.insert(block_root); + + let is_equivocation = block_roots.len() > 1; + + if is_equivocation { + SeenBlock::Slashable + } else if !newly_inserted { + SeenBlock::Duplicate + } else { + SeenBlock::UniqueNonSlashable + } + } + Entry::Vacant(vacant_entry) => { + let block_roots = HashSet::from([block_root]); + vacant_entry.insert(block_roots); + + SeenBlock::UniqueNonSlashable + } + }; + + Ok(slashable_proposal) } /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not @@ -72,15 +125,33 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn proposer_has_been_observed( + &self, + block: BeaconBlockRef<'_, E>, + block_root: Hash256, + ) -> Result { self.sanitize_block(block)?; - let exists = self - .items - .get(&block.slot()) - .map_or(false, |set| set.contains(&block.proposer_index())); + let key = ProposalKey { + slot: block.slot(), + proposer: block.proposer_index(), + }; - Ok(exists) + if let Some(block_roots) = self.items.get(&key) { + let block_already_known = block_roots.contains(&block_root); + let no_prev_known_blocks = + block_roots.difference(&HashSet::from([block_root])).count() == 0; + + if !no_prev_known_blocks { + Ok(SeenBlock::Slashable) + } else if block_already_known { + Ok(SeenBlock::Duplicate) + } else { + Ok(SeenBlock::UniqueNonSlashable) + } + } else { + Ok(SeenBlock::UniqueNonSlashable) + } } /// Returns `Ok(())` if the given `block` is sane. @@ -112,15 +183,15 @@ impl ObservedBlockProducers { } self.finalized_slot = finalized_slot; - self.items.retain(|slot, _set| *slot > finalized_slot); + self.items.retain(|key, _| key.slot > finalized_slot); } /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`. /// /// This is useful for doppelganger detection. pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { - self.items.iter().any(|(slot, producers)| { - slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) + self.items.iter().any(|(key, _)| { + key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index }) } } @@ -148,9 +219,12 @@ mod tests { // Slot 0, proposer 0 let block_a = get_block(0, 0); + let block_root = block_a.canonical_root(); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer, indicates proposer unobserved" ); @@ -164,7 +238,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -182,7 +259,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -207,9 +287,12 @@ mod tests { // First slot of finalized epoch, proposer 0 let block_b = get_block(E::slots_per_epoch(), 0); + let block_root_b = block_b.canonical_root(); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Err(Error::FinalizedBlock { slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(), @@ -229,7 +312,9 @@ mod tests { let block_b = get_block(three_epochs, 0); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can insert non-finalized block" ); @@ -238,7 +323,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(three_epochs)) + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) .expect("the three epochs slot should be present") .len(), 1, @@ -262,7 +350,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(three_epochs)) + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) .expect("the three epochs slot should be present") .len(), 1, @@ -276,24 +367,33 @@ mod tests { // Slot 0, proposer 0 let block_a = get_block(0, 0); + let block_root_a = block_a.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_a.to_ref()), + cache + .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation in empty cache" ); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root_a, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_a.to_ref()), + cache + .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root_a, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing again indicates true" ); @@ -303,7 +403,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -312,24 +415,33 @@ mod tests { // Slot 1, proposer 0 let block_b = get_block(1, 0); + let block_root_b = block_b.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_b.to_ref()), + cache + .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation for new slot" ); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_b.to_ref()), + cache + .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed block in slot 1 is indicated as true" ); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing slot 1 again indicates true" ); @@ -339,7 +451,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -348,7 +463,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(1)) + .get(&ProposalKey { + slot: Slot::new(1), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -357,45 +475,54 @@ mod tests { // Slot 0, proposer 1 let block_c = get_block(0, 1); + let block_root_c = block_c.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_c.to_ref()), + cache + .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation for new proposer" ); assert_eq!( - cache.observe_proposer(block_c.to_ref()), + cache + .observe_proposal(block_root_c, block_c.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe new proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_c.to_ref()), + cache + .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed new proposer block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_c.to_ref()), + cache + .observe_proposal(block_root_c, block_c.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing new proposer again indicates true" ); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); - assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!(cache.items.len(), 3, "three slots should be present"); assert_eq!( cache .items - .get(&Slot::new(0)) - .expect("slot zero should be present") - .len(), + .iter() + .filter(|(k, _)| k.slot == cache.finalized_slot) + .count(), 2, "two proposers should be present in slot 0" ); assert_eq!( cache .items - .get(&Slot::new(1)) - .expect("slot zero should be present") - .len(), + .iter() + .filter(|(k, _)| k.slot == Slot::new(1)) + .count(), 1, "only one proposer should be present in slot 1" ); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 91a1e24d82..086e1c0949 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,10 +1,18 @@ -use crate::{metrics, BeaconChainError}; -use lru::LruCache; -use oneshot_broadcast::{oneshot, Receiver, Sender}; +use std::collections::HashMap; use std::sync::Arc; -use types::{beacon_state::CommitteeCache, AttestationShufflingId, Epoch, Hash256}; -/// The size of the LRU cache that stores committee caches for quicker verification. +use itertools::Itertools; +use slog::{debug, Logger}; + +use oneshot_broadcast::{oneshot, Receiver, Sender}; +use types::{ + beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, + RelativeEpoch, +}; + +use crate::{metrics, BeaconChainError}; + +/// The size of the cache that stores committee caches for quicker verification. /// /// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash + /// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this @@ -45,18 +53,24 @@ impl CacheItem { } } -/// Provides an LRU cache for `CommitteeCache`. +/// Provides a cache for `CommitteeCache`. /// /// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like /// a find/replace error. pub struct ShufflingCache { - cache: LruCache, + cache: HashMap, + cache_size: usize, + head_shuffling_ids: BlockShufflingIds, + logger: Logger, } impl ShufflingCache { - pub fn new(cache_size: usize) -> Self { + pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds, logger: Logger) -> Self { Self { - cache: LruCache::new(cache_size), + cache: HashMap::new(), + cache_size, + head_shuffling_ids, + logger, } } @@ -76,7 +90,7 @@ impl ShufflingCache { metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS); metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); let ready = CacheItem::Committee(committee); - self.cache.put(key.clone(), ready.clone()); + self.insert_cache_item(key.clone(), ready.clone()); Some(ready) } // The promise has not yet been resolved. Return the promise so the caller can await @@ -93,13 +107,12 @@ impl ShufflingCache { // It's worth noting that this is the only place where we removed unresolved // promises from the cache. This means unresolved promises will only be removed if // we try to access them again. This is OK, since the promises don't consume much - // memory and the nature of the LRU cache means that future, relevant entries will - // still be added to the cache. We expect that *all* promises should be resolved, - // unless there is a programming or database error. + // memory. We expect that *all* promises should be resolved, unless there is a + // programming or database error. Err(oneshot_broadcast::Error::SenderDropped) => { metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_FAILS); metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES); - self.cache.pop(key); + self.cache.remove(key); None } }, @@ -112,13 +125,13 @@ impl ShufflingCache { } pub fn contains(&self, key: &AttestationShufflingId) -> bool { - self.cache.contains(key) + self.cache.contains_key(key) } - pub fn insert_committee_cache( + pub fn insert_committee_cache( &mut self, key: AttestationShufflingId, - committee_cache: &T, + committee_cache: &C, ) { if self .cache @@ -127,13 +140,55 @@ impl ShufflingCache { // worth two in the promise-bush! .map_or(true, CacheItem::is_promise) { - self.cache.put( + self.insert_cache_item( key, CacheItem::Committee(committee_cache.to_arc_committee_cache()), ); } } + /// Prunes the cache first before inserting a new cache item. + fn insert_cache_item(&mut self, key: AttestationShufflingId, cache_item: CacheItem) { + self.prune_cache(); + self.cache.insert(key, cache_item); + } + + /// Prunes the `cache` to keep the size below the `cache_size` limit, based on the following + /// preferences: + /// - Entries from more recent epochs are preferred over older ones. + /// - Entries with shuffling ids matching the head's previous, current, and future epochs must + /// not be pruned. + fn prune_cache(&mut self) { + let target_cache_size = self.cache_size.saturating_sub(1); + if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) { + let shuffling_ids_to_prune = self + .cache + .keys() + .sorted_by_key(|key| key.shuffling_epoch) + .filter(|shuffling_id| { + Some(shuffling_id) + != self + .head_shuffling_ids + .id_for_epoch(shuffling_id.shuffling_epoch) + .as_ref() + .as_ref() + }) + .take(prune_count) + .cloned() + .collect::>(); + + for shuffling_id in shuffling_ids_to_prune.iter() { + debug!( + self.logger, + "Removing old shuffling from cache"; + "shuffling_epoch" => shuffling_id.shuffling_epoch, + "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block + ); + self.cache.remove(shuffling_id); + } + } + } + pub fn create_promise( &mut self, key: AttestationShufflingId, @@ -148,9 +203,17 @@ impl ShufflingCache { } let (sender, receiver) = oneshot(); - self.cache.put(key, CacheItem::Promise(receiver)); + self.insert_cache_item(key, CacheItem::Promise(receiver)); Ok(sender) } + + /// Inform the cache that the shuffling decision roots for the head has changed. + /// + /// The shufflings for the head's previous, current, and future epochs will never be ejected from + /// the cache during `Self::insert_cache_item`. + pub fn update_head_shuffling_ids(&mut self, head_shuffling_ids: BlockShufflingIds) { + self.head_shuffling_ids = head_shuffling_ids; + } } /// A helper trait to allow lazy-cloning of the committee cache when inserting into the cache. @@ -170,26 +233,29 @@ impl ToArcCommitteeCache for Arc { } } -impl Default for ShufflingCache { - fn default() -> Self { - Self::new(DEFAULT_CACHE_SIZE) - } -} - /// Contains the shuffling IDs for a beacon block. +#[derive(Clone)] pub struct BlockShufflingIds { pub current: AttestationShufflingId, pub next: AttestationShufflingId, + pub previous: Option, pub block_root: Hash256, } impl BlockShufflingIds { /// Returns the shuffling ID for the given epoch. /// - /// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`. + /// Returns `None` if `epoch` is prior to `self.previous?.shuffling_epoch` or + /// `self.current.shuffling_epoch` (if `previous` is `None`). pub fn id_for_epoch(&self, epoch: Epoch) -> Option { if epoch == self.current.shuffling_epoch { Some(self.current.clone()) + } else if self + .previous + .as_ref() + .map_or(false, |id| id.shuffling_epoch == epoch) + { + self.previous.clone() } else if epoch == self.next.shuffling_epoch { Some(self.next.clone()) } else if epoch > self.next.shuffling_epoch { @@ -201,18 +267,57 @@ impl BlockShufflingIds { None } } + + pub fn try_from_head( + head_block_root: Hash256, + head_state: &BeaconState, + ) -> Result { + let get_shuffling_id = |relative_epoch| { + AttestationShufflingId::new(head_block_root, head_state, relative_epoch).map_err(|e| { + format!( + "Unable to get attester shuffling decision slot for the epoch {:?}: {:?}", + relative_epoch, e + ) + }) + }; + + Ok(Self { + current: get_shuffling_id(RelativeEpoch::Current)?, + next: get_shuffling_id(RelativeEpoch::Next)?, + previous: Some(get_shuffling_id(RelativeEpoch::Previous)?), + block_root: head_block_root, + }) + } } // Disable tests in debug since the beacon chain harness is slow unless in release. #[cfg(not(debug_assertions))] #[cfg(test)] mod test { - use super::*; - use crate::test_utils::EphemeralHarnessType; + use task_executor::test_utils::null_logger; use types::*; - type BeaconChainHarness = - crate::test_utils::BeaconChainHarness>; + use crate::test_utils::EphemeralHarnessType; + + use super::*; + + type E = MinimalEthSpec; + type TestBeaconChainType = EphemeralHarnessType; + type BeaconChainHarness = crate::test_utils::BeaconChainHarness; + const TEST_CACHE_SIZE: usize = 5; + + // Creates a new shuffling cache for testing + fn new_shuffling_cache() -> ShufflingCache { + let current_epoch = 8; + let head_shuffling_ids = BlockShufflingIds { + current: shuffling_id(current_epoch), + next: shuffling_id(current_epoch + 1), + previous: Some(shuffling_id(current_epoch - 1)), + block_root: Hash256::from_low_u64_le(0), + }; + let logger = null_logger().unwrap(); + ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids, logger) + } /// Returns two different committee caches for testing. fn committee_caches() -> (Arc, Arc) { @@ -249,7 +354,7 @@ mod test { fn resolved_promise() { let (committee_a, _) = committee_caches(); let id_a = shuffling_id(1); - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -276,7 +381,7 @@ mod test { #[test] fn unresolved_promise() { let id_a = shuffling_id(1); - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -301,7 +406,7 @@ mod test { fn two_promises() { let (committee_a, committee_b) = committee_caches(); let (id_a, id_b) = (shuffling_id(1), shuffling_id(2)); - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); // Create promise A. let sender_a = cache.create_promise(id_a.clone()).unwrap(); @@ -355,7 +460,7 @@ mod test { #[test] fn too_many_promises() { - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); for i in 0..MAX_CONCURRENT_PROMISES { cache.create_promise(shuffling_id(i as u64)).unwrap(); @@ -375,4 +480,105 @@ mod test { "the cache should have two entries" ); } + + #[test] + fn should_insert_committee_cache() { + let mut cache = new_shuffling_cache(); + let id_a = shuffling_id(1); + let committee_cache_a = Arc::new(CommitteeCache::default()); + cache.insert_committee_cache(id_a.clone(), &committee_cache_a); + assert!( + matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee_cache) if committee_cache == committee_cache_a), + "should insert committee cache" + ); + } + + #[test] + fn should_prune_committee_cache_with_lowest_epoch() { + let mut cache = new_shuffling_cache(); + let shuffling_id_and_committee_caches = (0..(TEST_CACHE_SIZE + 1)) + .map(|i| (shuffling_id(i as u64), Arc::new(CommitteeCache::default()))) + .collect::>(); + + for (shuffling_id, committee_cache) in shuffling_id_and_committee_caches.iter() { + cache.insert_committee_cache(shuffling_id.clone(), committee_cache); + } + + for i in 1..(TEST_CACHE_SIZE + 1) { + assert!( + cache.contains(&shuffling_id_and_committee_caches.get(i).unwrap().0), + "should contain recent epoch shuffling ids" + ); + } + + assert!( + !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), + "should not contain oldest epoch shuffling id" + ); + assert_eq!( + cache.cache.len(), + cache.cache_size, + "should limit cache size" + ); + } + + #[test] + fn should_retain_head_state_shufflings() { + let mut cache = new_shuffling_cache(); + let current_epoch = 10; + let committee_cache = Arc::new(CommitteeCache::default()); + + // Insert a few entries for next the epoch with different decision roots. + for i in 0..TEST_CACHE_SIZE { + let shuffling_id = AttestationShufflingId { + shuffling_epoch: (current_epoch + 1).into(), + shuffling_decision_block: Hash256::from_low_u64_be(current_epoch + i as u64), + }; + cache.insert_committee_cache(shuffling_id, &committee_cache); + } + + // Now, update the head shuffling ids + let head_shuffling_ids = BlockShufflingIds { + current: shuffling_id(current_epoch), + next: shuffling_id(current_epoch + 1), + previous: Some(shuffling_id(current_epoch - 1)), + block_root: Hash256::from_low_u64_le(42), + }; + cache.update_head_shuffling_ids(head_shuffling_ids.clone()); + + // Insert head state shuffling ids. Should not be overridden by other shuffling ids. + cache.insert_committee_cache(head_shuffling_ids.current.clone(), &committee_cache); + cache.insert_committee_cache(head_shuffling_ids.next.clone(), &committee_cache); + cache.insert_committee_cache( + head_shuffling_ids.previous.clone().unwrap(), + &committee_cache, + ); + + // Insert a few entries for older epochs. + for i in 0..TEST_CACHE_SIZE { + let shuffling_id = AttestationShufflingId { + shuffling_epoch: Epoch::from(i), + shuffling_decision_block: Hash256::from_low_u64_be(i as u64), + }; + cache.insert_committee_cache(shuffling_id, &committee_cache); + } + + assert!( + cache.contains(&head_shuffling_ids.current), + "should retain head shuffling id for the current epoch." + ); + assert!( + cache.contains(&head_shuffling_ids.next), + "should retain head shuffling id for the next epoch." + ); + assert!( + cache.contains(&head_shuffling_ids.previous.unwrap()), + "should retain head shuffling id for previous epoch." + ); + assert_eq!( + cache.cache.len(), + cache.cache_size, + "should limit cache size" + ); + } } diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 4b4228e71d..246bb12cc0 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -37,6 +37,7 @@ use bls::{verify_signature_sets, PublicKeyBytes}; use derivative::Derivative; use safe_arith::ArithError; use slot_clock::SlotClock; +use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError; use state_processing::signature_sets::{ signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set, @@ -47,6 +48,7 @@ use std::borrow::Cow; use std::collections::HashMap; use strum::AsRefStr; use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; use types::sync_committee::Error as SyncCommitteeError; @@ -110,14 +112,14 @@ pub enum Error { /// /// The peer has sent an invalid message. AggregatorPubkeyUnknown(u64), - /// The sync contribution has been seen before; either in a block, on the gossip network or from a - /// local validator. + /// The sync contribution or a superset of this sync contribution's aggregation bits for the same data + /// has been seen before; either in a block on the gossip network or from a local validator. /// /// ## Peer scoring /// /// It's unclear if this sync contribution is valid, however we have already observed it and do not /// need to observe it again. - SyncContributionAlreadyKnown(Hash256), + SyncContributionSupersetKnown(Hash256), /// There has already been an aggregation observed for this validator, we refuse to process a /// second. /// @@ -153,7 +155,21 @@ pub enum Error { /// It's unclear if this sync message is valid, however we have already observed a /// signature from this validator for this slot and should not observe /// another. - PriorSyncCommitteeMessageKnown { validator_index: u64, slot: Slot }, + PriorSyncCommitteeMessageKnown { + validator_index: u64, + slot: Slot, + prev_root: Hash256, + new_root: Hash256, + }, + /// We have already observed a contribution for the aggregator and refuse to + /// process another. + /// + /// ## Peer scoring + /// + /// It's unclear if this sync message is valid, however we have already observed a + /// signature from this validator for this slot and should not observe + /// another. + PriorSyncContributionMessageKnown { validator_index: u64, slot: Slot }, /// The sync committee message was received on an invalid sync committee message subnet. /// /// ## Peer scoring @@ -254,6 +270,14 @@ pub struct VerifiedSyncContribution { participant_pubkeys: Vec, } +/// The sync contribution data. +#[derive(Encode, Decode, TreeHash)] +pub struct SyncCommitteeData { + pub slot: Slot, + pub root: Hash256, + pub subcommittee_index: u64, +} + /// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network. #[derive(Clone)] pub struct VerifiedSyncCommitteeMessage { @@ -300,15 +324,22 @@ impl VerifiedSyncContribution { return Err(Error::AggregatorNotInCommittee { aggregator_index }); }; - // Ensure the valid sync contribution has not already been seen locally. - let contribution_root = contribution.tree_hash_root(); + // Ensure the valid sync contribution or its superset has not already been seen locally. + let contribution_data_root = SyncCommitteeData { + slot: contribution.slot, + root: contribution.beacon_block_root, + subcommittee_index: contribution.subcommittee_index, + } + .tree_hash_root(); + if chain .observed_sync_contributions .write() - .is_known(contribution, contribution_root) + .is_known_subset(contribution, contribution_data_root) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::SyncContributionAlreadyKnown(contribution_root)); + metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); + return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); } // Ensure there has been no other observed aggregate for the given `aggregator_index`. @@ -362,13 +393,14 @@ impl VerifiedSyncContribution { // // It's important to double check that the contribution is not already known, otherwise two // contribution processed at the same time could be published. - if let ObserveOutcome::AlreadyKnown = chain + if let ObserveOutcome::Subset = chain .observed_sync_contributions .write() - .observe_item(contribution, Some(contribution_root)) + .observe_item(contribution, Some(contribution_data_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::SyncContributionAlreadyKnown(contribution_root)); + metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); + return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); } // Observe the aggregator so we don't process another aggregate from them. @@ -378,10 +410,10 @@ impl VerifiedSyncContribution { if chain .observed_sync_aggregators .write() - .observe_validator(observed_key, aggregator_index as usize) + .observe_validator(observed_key, aggregator_index as usize, ()) .map_err(BeaconChainError::from)? { - return Err(Error::PriorSyncCommitteeMessageKnown { + return Err(Error::PriorSyncContributionMessageKnown { validator_index: aggregator_index, slot: contribution.slot, }); @@ -450,19 +482,40 @@ impl VerifiedSyncCommitteeMessage { // The sync committee message is the first valid message received for the participating validator // for the slot, sync_message.slot. let validator_index = sync_message.validator_index; - if chain + let head_root = chain.canonical_head.cached_head().head_block_root(); + let new_root = sync_message.beacon_block_root; + let should_override_prev = |prev_root: &Hash256, new_root: &Hash256| { + let roots_differ = new_root != prev_root; + let new_elects_head = new_root == &head_root; + + if roots_differ { + // Track sync committee messages that differ from each other. + metrics::inc_counter(&metrics::SYNC_MESSAGE_EQUIVOCATIONS); + if new_elects_head { + // Track sync committee messages that swap from an old block to a new block. + metrics::inc_counter(&metrics::SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD); + } + } + + roots_differ && new_elects_head + }; + if let Some(prev_root) = chain .observed_sync_contributors .read() - .validator_has_been_observed( + .observation_for_validator( SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()), validator_index as usize, ) .map_err(BeaconChainError::from)? { - return Err(Error::PriorSyncCommitteeMessageKnown { - validator_index, - slot: sync_message.slot, - }); + if !should_override_prev(&prev_root, &new_root) { + return Err(Error::PriorSyncCommitteeMessageKnown { + validator_index, + slot: sync_message.slot, + prev_root, + new_root, + }); + } } // The aggregate signature of the sync committee message is valid. @@ -474,18 +527,22 @@ impl VerifiedSyncCommitteeMessage { // It's important to double check that the sync committee message still hasn't been observed, since // there can be a race-condition if we receive two sync committee messages at the same time and // process them in different threads. - if chain + if let Some(prev_root) = chain .observed_sync_contributors .write() - .observe_validator( + .observe_validator_with_override( SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()), validator_index as usize, + sync_message.beacon_block_root, + should_override_prev, ) .map_err(BeaconChainError::from)? { return Err(Error::PriorSyncCommitteeMessageKnown { validator_index, slot: sync_message.slot, + prev_root, + new_root, }); } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c5615b6185..6520c9ba9c 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -22,7 +22,6 @@ use execution_layer::{ }, ExecutionLayer, }; -use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -734,6 +733,15 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + pub async fn make_blinded_block( + &self, + state: BeaconState, + slot: Slot, + ) -> (SignedBlindedBeaconBlock, BeaconState) { + let (unblinded, new_state) = self.make_block(state, slot).await; + (unblinded.into(), new_state) + } + /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, @@ -746,9 +754,7 @@ where complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); - state - .build_all_caches(&self.spec) - .expect("should build caches"); + state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); @@ -795,9 +801,7 @@ where complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); - state - .build_all_caches(&self.spec) - .expect("should build caches"); + state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); @@ -1515,6 +1519,36 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { + let propposer_slashing = self.make_proposer_slashing(validator_index); + if let ObservationOutcome::New(verified_proposer_slashing) = self + .chain + .verify_proposer_slashing_for_gossip(propposer_slashing) + .expect("should verify proposer slashing for gossip") + { + self.chain + .import_proposer_slashing(verified_proposer_slashing); + Ok(()) + } else { + Err("should observe new proposer slashing".to_string()) + } + } + + pub fn add_attester_slashing(&self, validator_indices: Vec) -> Result<(), String> { + let attester_slashing = self.make_attester_slashing(validator_indices); + if let ObservationOutcome::New(verified_attester_slashing) = self + .chain + .verify_attester_slashing_for_gossip(attester_slashing) + .expect("should verify attester slashing for gossip") + { + self.chain + .import_attester_slashing(verified_attester_slashing); + Ok(()) + } else { + Err("should observe new attester slashing".to_string()) + } + } + pub fn add_bls_to_execution_change( &self, validator_index: u64, @@ -1696,8 +1730,8 @@ where .process_block( block_root, Arc::new(block), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await? .into(); @@ -1714,8 +1748,8 @@ where .process_block( block.canonical_root(), Arc::new(block), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await? .into(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 1040521e5a..5cea51090b 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -699,8 +699,8 @@ async fn aggregated_gossip_verification() { |tester, err| { assert!(matches!( err, - AttnError::AttestationAlreadyKnown(hash) - if hash == tester.valid_aggregate.message.aggregate.tree_hash_root() + AttnError::AttestationSupersetKnown(hash) + if hash == tester.valid_aggregate.message.aggregate.data.tree_hash_root() )) }, ) diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index c66ed60a9c..75b00b2b44 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,8 +3,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; -use fork_choice::CountUnrealized; +use beacon_chain::{ + BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer, +}; use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; @@ -148,18 +149,14 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(vec![], NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment( - blocks.clone(), - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) + .process_chain_segment(blocks.clone(), NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import chain segment"); @@ -188,11 +185,7 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment( - chunk.to_vec(), - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) + .process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); @@ -228,7 +221,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -248,7 +241,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -279,7 +272,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -300,7 +293,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -326,7 +319,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -348,11 +341,7 @@ async fn assert_invalid_signature( // imported prior to this test. let _ = harness .chain - .process_chain_segment( - ancestor_blocks, - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) + .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) .await; harness.chain.recompute_head_at_current_slot().await; @@ -361,8 +350,8 @@ async fn assert_invalid_signature( .process_block( snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await; assert!( @@ -414,11 +403,7 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment( - ancestor_blocks, - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) + .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -430,8 +415,8 @@ async fn invalid_signature_gossip_block() { .process_block( signed_block.canonical_root(), Arc::new(signed_block), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await, Err(BlockError::InvalidSignature) @@ -465,7 +450,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -663,7 +648,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes) + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -743,8 +728,8 @@ async fn block_gossip_verification() { .process_block( gossip_verified.block_root, gossip_verified, - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .expect("should import valid gossip verified block"); @@ -941,11 +926,7 @@ async fn block_gossip_verification() { assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), - BlockError::RepeatProposal { - proposer, - slot, - } - if proposer == other_proposer && slot == block.message().slot() + BlockError::BlockIsAlreadyKnown, ), "should register any valid signature against the proposer, even if the block failed later verification" ); @@ -974,11 +955,7 @@ async fn block_gossip_verification() { .await .err() .expect("should error when processing known block"), - BlockError::RepeatProposal { - proposer, - slot, - } - if proposer == block.message().proposer_index() && slot == block.message().slot() + BlockError::BlockIsAlreadyKnown ), "the second proposal by this validator should be rejected" ); @@ -1015,8 +992,8 @@ async fn verify_block_for_gossip_slashing_detection() { .process_block( verified_block.block_root, verified_block, - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1055,8 +1032,8 @@ async fn verify_block_for_gossip_doppelganger_detection() { .process_block( verified_block.block_root, verified_block, - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1203,8 +1180,8 @@ async fn add_base_block_to_altair_chain() { .process_block( base_block.canonical_root(), Arc::new(base_block.clone()), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .err() @@ -1219,11 +1196,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment( - vec![Arc::new(base_block)], - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) + .process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1342,8 +1315,8 @@ async fn add_altair_block_to_base_chain() { .process_block( altair_block.canonical_root(), Arc::new(altair_block.clone()), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .err() @@ -1358,11 +1331,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment( - vec![Arc::new(altair_block)], - CountUnrealized::True, - NotifyExecutionLayer::Yes - ) + .process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1373,3 +1342,100 @@ async fn add_altair_block_to_base_chain() { } )); } + +#[tokio::test] +async fn import_duplicate_block_unrealized_justification() { + let spec = MainnetEthSpec::default_spec(); + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + let chain = &harness.chain; + + // Move out of the genesis slot. + harness.advance_slot(); + + // Build the chain out to the first justification opportunity 2/3rds of the way through epoch 2. + let num_slots = E::slots_per_epoch() as usize * 8 / 3; + harness + .extend_chain( + num_slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Move into the next empty slot. + harness.advance_slot(); + + // The store's justified checkpoint must still be at epoch 0, while unrealized justification + // must be at epoch 1. + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); + drop(fc); + + // Produce a block to justify epoch 2. + let state = harness.get_current_state(); + let slot = harness.get_current_slot(); + let (block, _) = harness.make_block(state.clone(), slot).await; + let block = Arc::new(block); + let block_root = block.canonical_root(); + + // Create two verified variants of the block, representing the same block being processed in + // parallel. + let notify_execution_layer = NotifyExecutionLayer::Yes; + let verified_block1 = block + .clone() + .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .unwrap(); + let verified_block2 = block + .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .unwrap(); + + // Import the first block, simulating a block processed via a finalized chain segment. + chain + .clone() + .import_execution_pending_block(verified_block1) + .await + .unwrap(); + + // Unrealized justification should NOT have updated. + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + let unrealized_justification = fc.unrealized_justified_checkpoint(); + assert_eq!(unrealized_justification.epoch, 2); + + // The fork choice node for the block should have unrealized justification. + let fc_block = fc.get_block(&block_root).unwrap(); + assert_eq!( + fc_block.unrealized_justified_checkpoint, + Some(unrealized_justification) + ); + drop(fc); + + // Import the second verified block, simulating a block processed via RPC. + chain + .clone() + .import_execution_pending_block(verified_block2) + .await + .unwrap(); + + // Unrealized justification should still be updated. + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + assert_eq!( + fc.unrealized_justified_checkpoint(), + unrealized_justification + ); + + // The fork choice node for the block should still have the unrealized justified checkpoint. + let fc_block = fc.get_block(&block_root).unwrap(); + assert_eq!( + fc_block.unrealized_justified_checkpoint, + Some(unrealized_justification) + ); +} diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index e910e8134f..f0b799ec9f 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -133,13 +133,8 @@ async fn base_altair_merge_capella() { for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(); + let full_payload: FullPayload = + block.message().body().execution_payload().unwrap().into(); // pre-capella shouldn't have withdrawals assert!(full_payload.withdrawals_root().is_err()); execution_payloads.push(full_payload); @@ -151,13 +146,8 @@ async fn base_altair_merge_capella() { for _ in 0..16 { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(); + let full_payload: FullPayload = + block.message().body().execution_payload().unwrap().into(); // post-capella should have withdrawals assert!(full_payload.withdrawals_root().is_ok()); execution_payloads.push(full_payload); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 54d7734471..9a8c324d09 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -17,9 +17,7 @@ use execution_layer::{ test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkchoiceState, PayloadAttributes, }; -use fork_choice::{ - CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, -}; +use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; use logging::test_logger; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; @@ -698,8 +696,8 @@ async fn invalidates_all_descendants() { .process_block( fork_block.canonical_root(), Arc::new(fork_block), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -795,8 +793,8 @@ async fn switches_heads() { .process_block( fork_block.canonical_root(), Arc::new(fork_block), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -910,6 +908,9 @@ async fn invalid_after_optimistic_sync() { .await, ); + // EL status should still be online, no errors. + assert!(!rig.execution_layer().is_offline_or_erroring().await); + // Running fork choice is necessary since a block has been invalidated. rig.recompute_head().await; @@ -1047,7 +1048,9 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), + ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1061,8 +1064,9 @@ async fn invalid_parent() { Duration::from_secs(0), &state, PayloadVerificationStatus::Optimistic, + rig.harness.chain.config.progressive_balances_mode, &rig.harness.chain.spec, - CountUnrealized::True, + rig.harness.logger() ), Err(ForkChoiceError::ProtoArrayStringError(message)) if message.contains(&format!( @@ -1336,8 +1340,8 @@ async fn build_optimistic_chain( .process_block( block.canonical_root(), block, - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1897,8 +1901,8 @@ async fn recover_from_invalid_head_by_importing_blocks() { .process_block( fork_block.canonical_root(), fork_block.clone(), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 2f40443b99..2902774825 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -12,7 +12,6 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; -use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; @@ -2151,8 +2150,8 @@ async fn weak_subjectivity_sync() { .process_block( full_block.canonical_root(), Arc::new(full_block), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 239f55e7d3..0e4745ff6b 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -1,16 +1,20 @@ #![cfg(not(debug_assertions))] -use beacon_chain::sync_committee_verification::Error as SyncCommitteeError; +use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; use safe_arith::SafeArith; +use state_processing::{ + per_block_processing::{altair::sync_committee::process_sync_aggregate, VerifySignatures}, + state_advance::complete_state_advance, +}; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::{ AggregateSignature, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, Slot, - SyncSelectionProof, SyncSubnetId, Unsigned, + SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -47,10 +51,29 @@ fn get_valid_sync_committee_message( relative_sync_committee: RelativeSyncCommittee, message_index: usize, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { - let head_state = harness.chain.head_beacon_state_cloned(); let head_block_root = harness.chain.head_snapshot().beacon_block_root; + get_valid_sync_committee_message_for_block( + harness, + slot, + relative_sync_committee, + message_index, + head_block_root, + ) +} + +/// Returns a sync message that is valid for some slot in the given `chain`. +/// +/// Also returns some info about who created it. +fn get_valid_sync_committee_message_for_block( + harness: &BeaconChainHarness>, + slot: Slot, + relative_sync_committee: RelativeSyncCommittee, + message_index: usize, + block_root: Hash256, +) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { + let head_state = harness.chain.head_beacon_state_cloned(); let (signature, _) = harness - .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) + .make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee) .get(0) .expect("sync messages should exist") .get(message_index) @@ -119,7 +142,7 @@ fn get_non_aggregator( subcommittee.iter().find_map(|pubkey| { let validator_index = harness .chain - .validator_index(&pubkey) + .validator_index(pubkey) .expect("should get validator index") .expect("pubkey should exist in beacon chain"); @@ -376,7 +399,7 @@ async fn aggregated_gossip_verification() { SyncCommitteeError::AggregatorNotInCommittee { aggregator_index } - if aggregator_index == valid_aggregate.message.aggregator_index as u64 + if aggregator_index == valid_aggregate.message.aggregator_index ); /* @@ -421,11 +444,17 @@ async fn aggregated_gossip_verification() { * subcommittee index contribution.subcommittee_index. */ + let contribution = &valid_aggregate.message.contribution; + let sync_committee_data = SyncCommitteeData { + slot: contribution.slot, + root: contribution.beacon_block_root, + subcommittee_index: contribution.subcommittee_index, + }; assert_invalid!( "aggregate that has already been seen", valid_aggregate.clone(), - SyncCommitteeError::SyncContributionAlreadyKnown(hash) - if hash == valid_aggregate.message.contribution.tree_hash_root() + SyncCommitteeError::SyncContributionSupersetKnown(hash) + if hash == sync_committee_data.tree_hash_root() ); /* @@ -472,7 +501,7 @@ async fn aggregated_gossip_verification() { assert_invalid!( "sync contribution created with incorrect sync committee", - next_valid_contribution.clone(), + next_valid_contribution, SyncCommitteeError::InvalidSignature | SyncCommitteeError::AggregatorNotInCommittee { .. } ); } @@ -496,6 +525,30 @@ async fn unaggregated_gossip_verification() { let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0); + let parent_root = harness.chain.head_snapshot().beacon_block.parent_root(); + let (valid_sync_committee_message_to_parent, _, _, _) = + get_valid_sync_committee_message_for_block( + &harness, + current_slot, + RelativeSyncCommittee::Current, + 0, + parent_root, + ); + + assert_eq!( + valid_sync_committee_message.slot, valid_sync_committee_message_to_parent.slot, + "test pre-condition: same slot" + ); + assert_eq!( + valid_sync_committee_message.validator_index, + valid_sync_committee_message_to_parent.validator_index, + "test pre-condition: same validator index" + ); + assert!( + valid_sync_committee_message.beacon_block_root + != valid_sync_committee_message_to_parent.beacon_block_root, + "test pre-condition: differing roots" + ); macro_rules! assert_invalid { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { @@ -602,28 +655,130 @@ async fn unaggregated_gossip_verification() { SyncCommitteeError::InvalidSignature ); + let head_root = valid_sync_committee_message.beacon_block_root; + let parent_root = valid_sync_committee_message_to_parent.beacon_block_root; + + let verifed_message_to_parent = harness + .chain + .verify_sync_committee_message_for_gossip( + valid_sync_committee_message_to_parent.clone(), + subnet_id, + ) + .expect("valid sync message to parent should be verified"); + // Add the aggregate to the pool. harness .chain - .verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id) - .expect("valid sync message should be verified"); + .add_to_naive_sync_aggregation_pool(verifed_message_to_parent) + .unwrap(); /* * The following test ensures that: * - * There has been no other valid sync committee message for the declared slot for the - * validator referenced by sync_committee_message.validator_index. + * A sync committee message from the same validator to the same block will + * be rejected. */ assert_invalid!( - "sync message that has already been seen", - valid_sync_committee_message, + "sync message to parent block that has already been seen", + valid_sync_committee_message_to_parent.clone(), subnet_id, SyncCommitteeError::PriorSyncCommitteeMessageKnown { validator_index, slot, + prev_root, + new_root } - if validator_index == expected_validator_index as u64 && slot == current_slot + if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == parent_root && new_root == parent_root ); + let verified_message_to_head = harness + .chain + .verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id) + .expect("valid sync message to the head should be verified"); + // Add the aggregate to the pool. + harness + .chain + .add_to_naive_sync_aggregation_pool(verified_message_to_head) + .unwrap(); + + /* + * The following test ensures that: + * + * A sync committee message from the same validator to the same block will + * be rejected. + */ + assert_invalid!( + "sync message to the head that has already been seen", + valid_sync_committee_message.clone(), + subnet_id, + SyncCommitteeError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + prev_root, + new_root + } + if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == head_root && new_root == head_root + ); + + /* + * The following test ensures that: + * + * A sync committee message from the same validator to a non-head block will + * be rejected. + */ + assert_invalid!( + "sync message to parent after message to head has already been seen", + valid_sync_committee_message_to_parent.clone(), + subnet_id, + SyncCommitteeError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + prev_root, + new_root + } + if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == head_root && new_root == parent_root + ); + + // Ensure that the sync aggregates in the op pool for both the parent block and head block are valid. + let chain = &harness.chain; + let check_sync_aggregate = |root: Hash256| async move { + // Generate an aggregate sync message from the naive aggregation pool. + let aggregate = chain + .get_aggregated_sync_committee_contribution(&SyncContributionData { + // It's a test pre-condition that both sync messages have the same slot. + slot: valid_sync_committee_message.slot, + beacon_block_root: root, + subcommittee_index: subnet_id.into(), + }) + .unwrap() + .unwrap(); + + // Insert the aggregate into the op pool. + chain.op_pool.insert_sync_contribution(aggregate).unwrap(); + + // Load the block and state for the given root. + let block = chain.get_block(&root).await.unwrap().unwrap(); + let mut state = chain.get_state(&block.state_root(), None).unwrap().unwrap(); + + // Advance the state to simulate a pre-state for block production. + let slot = valid_sync_committee_message.slot + 1; + complete_state_advance(&mut state, Some(block.state_root()), slot, &chain.spec).unwrap(); + + // Get an aggregate that would be included in a block. + let aggregate_for_inclusion = chain.op_pool.get_sync_aggregate(&state).unwrap().unwrap(); + + // Validate the retrieved aggregate against the state. + process_sync_aggregate( + &mut state, + &aggregate_for_inclusion, + 0, + VerifySignatures::True, + &chain.spec, + ) + .unwrap(); + }; + check_sync_aggregate(valid_sync_committee_message.beacon_block_root).await; + check_sync_aggregate(valid_sync_committee_message_to_parent.beacon_block_root).await; + /* * The following test ensures that: * @@ -649,7 +804,7 @@ async fn unaggregated_gossip_verification() { assert_invalid!( "sync message on incorrect subnet", - next_valid_sync_committee_message.clone(), + next_valid_sync_committee_message, next_subnet_id, SyncCommitteeError::InvalidSubnetId { received, diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index b4eabc8093..c5b2892cbd 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,7 +8,6 @@ use beacon_chain::{ }, BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; -use fork_choice::CountUnrealized; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ @@ -687,8 +686,8 @@ async fn run_skip_slot_test(skip_slots: u64) { .process_block( harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()) ) .await .unwrap(), diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 255c2fdd19..c78f686d02 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -72,7 +72,7 @@ impl BuilderHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Into::into) } /// Perform a HTTP GET request, returning the `Response` for further processing. @@ -85,7 +85,7 @@ impl BuilderHttpClient { if let Some(timeout) = timeout { builder = builder.timeout(timeout); } - let response = builder.send().await.map_err(Error::Reqwest)?; + let response = builder.send().await.map_err(Error::from)?; ok_or_error(response).await } @@ -114,7 +114,7 @@ impl BuilderHttpClient { if let Some(timeout) = timeout { builder = builder.timeout(timeout); } - let response = builder.json(body).send().await.map_err(Error::Reqwest)?; + let response = builder.json(body).send().await.map_err(Error::from)?; ok_or_error(response).await } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 876458eea5..64c79ea668 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,7 +6,6 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" -logging = { path = "../../common/logging" } state_processing = { path = "../../consensus/state_processing" } operation_pool = { path = "../operation_pool" } tokio = "1.14.0" @@ -17,6 +16,7 @@ store = { path = "../store" } network = { path = "../network" } timer = { path = "../timer" } lighthouse_network = { path = "../lighthouse_network" } +logging = { path = "../../common/logging" } parking_lot = "0.12.0" types = { path = "../../consensus/types" } eth2_config = { path = "../../common/eth2_config" } @@ -39,7 +39,7 @@ time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } -slasher = { path = "../../slasher", default-features = false } +slasher = { path = "../../slasher" } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 5ef1f28fb4..e05b92a277 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -478,6 +478,7 @@ where network_globals: None, eth1_service: Some(genesis_service.eth1_service.clone()), log: context.log().clone(), + sse_logging_components: runtime_context.sse_logging_components.clone(), }); // Discard the error from the oneshot. @@ -698,6 +699,7 @@ where network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), eth1_service: self.eth1_service.clone(), + sse_logging_components: runtime_context.sse_logging_components.clone(), log: log.clone(), }); @@ -740,7 +742,7 @@ where runtime_context .executor - .spawn_without_exit(async move { server.await }, "http-metrics"); + .spawn_without_exit(server, "http-metrics"); Some(listen_addr) } else { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 1148f063d8..cc982aee08 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } serde_yaml = "0.8.13" -web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index cd680478cc..505e4a4796 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -2,7 +2,7 @@ use environment::{Environment, EnvironmentBuilder}; use eth1::{Config, Eth1Endpoint, Service}; use eth1::{DepositCache, DEFAULT_CHAIN_ID}; -use eth1_test_rig::GanacheEth1Instance; +use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; @@ -12,7 +12,6 @@ use std::ops::Range; use std::time::Duration; use tree_hash::TreeHash; use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature}; -use web3::{transports::Http, Web3}; const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; @@ -53,7 +52,7 @@ fn random_deposit_data() -> DepositData { /// Blocking operation to get the deposit logs from the `deposit_contract`. async fn blocking_deposit_logs( client: &HttpJsonRpc, - eth1: &GanacheEth1Instance, + eth1: &AnvilEth1Instance, range: Range, ) -> Vec { client @@ -65,7 +64,7 @@ async fn blocking_deposit_logs( /// Blocking operation to get the deposit root from the `deposit_contract`. async fn blocking_deposit_root( client: &HttpJsonRpc, - eth1: &GanacheEth1Instance, + eth1: &AnvilEth1Instance, block_number: u64, ) -> Option { client @@ -77,7 +76,7 @@ async fn blocking_deposit_root( /// Blocking operation to get the deposit count from the `deposit_contract`. async fn blocking_deposit_count( client: &HttpJsonRpc, - eth1: &GanacheEth1Instance, + eth1: &AnvilEth1Instance, block_number: u64, ) -> Option { client @@ -86,16 +85,16 @@ async fn blocking_deposit_count( .expect("should get deposit count") } -async fn get_block_number(web3: &Web3) -> u64 { - web3.eth() - .block_number() +async fn get_block_number(client: &Provider) -> u64 { + client + .get_block_number() .await .map(|v| v.as_u64()) .expect("should get block number") } -async fn new_ganache_instance() -> Result { - GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await +async fn new_anvil_instance() -> Result { + AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await } mod eth1_cache { @@ -108,13 +107,13 @@ mod eth1_cache { let log = null_logger(); for follow_distance in 0..3 { - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let initial_block_number = get_block_number(&web3).await; + let initial_block_number = get_block_number(&anvil_client).await; let config = Config { endpoint: Eth1Endpoint::NoAuth( @@ -146,7 +145,7 @@ mod eth1_cache { }; for _ in 0..blocks { - eth1.ganache.evm_mine().await.expect("should mine block"); + eth1.anvil.evm_mine().await.expect("should mine block"); } service @@ -189,11 +188,11 @@ mod eth1_cache { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let cache_len = 4; @@ -203,7 +202,7 @@ mod eth1_cache { SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), ), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, + lowest_cached_block_number: get_block_number(&anvil_client).await, follow_distance: 0, block_cache_truncation: Some(cache_len), ..Config::default() @@ -216,7 +215,7 @@ mod eth1_cache { let blocks = cache_len * 2; for _ in 0..blocks { - eth1.ganache.evm_mine().await.expect("should mine block") + eth1.anvil.evm_mine().await.expect("should mine block") } service @@ -244,11 +243,11 @@ mod eth1_cache { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let cache_len = 4; @@ -258,7 +257,7 @@ mod eth1_cache { SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), ), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, + lowest_cached_block_number: get_block_number(&anvil_client).await, follow_distance: 0, block_cache_truncation: Some(cache_len), ..Config::default() @@ -270,7 +269,7 @@ mod eth1_cache { for _ in 0..4u8 { for _ in 0..cache_len / 2 { - eth1.ganache.evm_mine().await.expect("should mine block") + eth1.anvil.evm_mine().await.expect("should mine block") } service .update_deposit_cache(None) @@ -298,11 +297,11 @@ mod eth1_cache { let n = 16; - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let service = Service::new( Config { @@ -310,7 +309,7 @@ mod eth1_cache { SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), ), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, + lowest_cached_block_number: get_block_number(&anvil_client).await, follow_distance: 0, ..Config::default() }, @@ -320,7 +319,7 @@ mod eth1_cache { .unwrap(); for _ in 0..n { - eth1.ganache.evm_mine().await.expect("should mine block") + eth1.anvil.evm_mine().await.expect("should mine block") } futures::try_join!( @@ -341,6 +340,7 @@ mod eth1_cache { } mod deposit_tree { + use super::*; #[tokio::test] @@ -350,13 +350,13 @@ mod deposit_tree { let n = 4; - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let start_block = get_block_number(&web3).await; + let start_block = get_block_number(&anvil_client).await; let service = Service::new( Config { @@ -431,13 +431,13 @@ mod deposit_tree { let n = 8; - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let start_block = get_block_number(&web3).await; + let start_block = get_block_number(&anvil_client).await; let service = Service::new( Config { @@ -484,11 +484,12 @@ mod deposit_tree { let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; @@ -502,7 +503,7 @@ mod deposit_tree { .deposit(deposit.clone()) .await .expect("should perform a deposit"); - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; deposit_roots.push( blocking_deposit_root(&client, ð1, block_number) .await @@ -518,7 +519,7 @@ mod deposit_tree { let mut tree = DepositCache::default(); // Pull all the deposit logs from the contract. - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) .await .iter() @@ -593,15 +594,15 @@ mod http { #[tokio::test] async fn incrementing_deposits() { async { - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), 0); @@ -616,10 +617,10 @@ mod http { ); for i in 1..=8 { - eth1.ganache + eth1.anvil .increase_time(1) .await - .expect("should be able to increase time on ganache"); + .expect("should be able to increase time on anvil"); deposit_contract .deposit(random_deposit_data()) @@ -627,7 +628,7 @@ mod http { .expect("should perform a deposit"); // Check the logs. - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), i, "the number of logs should be as expected"); @@ -690,13 +691,13 @@ mod fast { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let now = get_block_number(&web3).await; + let now = get_block_number(&anvil_client).await; let spec = MainnetEthSpec::default_spec(); let service = Service::new( Config { @@ -724,7 +725,7 @@ mod fast { .await .expect("should perform a deposit"); // Mine an extra block between deposits to test for corner cases - eth1.ganache.evm_mine().await.expect("should mine block"); + eth1.anvil.evm_mine().await.expect("should mine block"); } service @@ -737,7 +738,7 @@ mod fast { "should have imported n deposits" ); - for block_num in 0..=get_block_number(&web3).await { + for block_num in 0..=get_block_number(&anvil_client).await { let expected_deposit_count = blocking_deposit_count(&client, ð1, block_num).await; let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; @@ -773,13 +774,13 @@ mod persist { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let now = get_block_number(&web3).await; + let now = get_block_number(&anvil_client).await; let config = Config { endpoint: Eth1Endpoint::NoAuth( SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 3ed7ba65d6..2cb28346f5 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -23,7 +23,7 @@ bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" ethereum_ssz = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } superstruct = "0.6.0" @@ -50,3 +50,4 @@ keccak-hash = "0.10.0" hash256-std-hasher = "0.15.2" triehash = "0.8.4" hash-db = "0.15.2" +pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } \ No newline at end of file diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index e9b7dcc17f..c889fead0a 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -12,12 +12,13 @@ use types::{ }; impl ExecutionLayer { - /// Verify `payload.block_hash` locally within Lighthouse. + /// Calculate the block hash of an execution block. /// - /// No remote calls to the execution client will be made, so this is quite a cheap check. - pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - + /// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP + /// transactions. + pub fn calculate_execution_block_hash( + payload: ExecutionPayloadRef, + ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a // better alternative when one appears, possibly following Reth. @@ -46,7 +47,19 @@ impl ExecutionLayer { // Hash the RLP encoding of the block header. let rlp_block_header = rlp_encode_block_header(&exec_block_header); - let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); + ( + ExecutionBlockHash::from_root(keccak256(&rlp_block_header)), + rlp_transactions_root, + ) + } + + /// Verify `payload.block_hash` locally within Lighthouse. + /// + /// No remote calls to the execution client will be made, so this is quite a cheap check. + pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); + + let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload); if header_hash != payload.block_hash() { return Err(Error::BlockHashMismatch { diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4d2eb565e1..826294d5ff 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -10,6 +10,7 @@ pub use ethers_core::types::Transaction; use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; +use pretty_reqwest_error::PrettyReqwestError; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -32,7 +33,7 @@ pub type PayloadId = [u8; 8]; #[derive(Debug)] pub enum Error { - Reqwest(reqwest::Error), + HttpClient(PrettyReqwestError), Auth(auth::Error), BadResponse(String), RequestFailed(String), @@ -67,7 +68,7 @@ impl From for Error { ) { Error::Auth(auth::Error::InvalidToken) } else { - Error::Reqwest(e) + Error::HttpClient(e.into()) } } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index ce413cb113..362f5b0b2b 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -238,6 +238,11 @@ impl Engine { **self.state.read().await == EngineStateInternal::Synced } + /// Returns `true` if the engine has a status other than synced or syncing. + pub async fn is_offline(&self) -> bool { + EngineState::from(**self.state.read().await) == EngineState::Offline + } + /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 16a7f3665f..0e9df7a50d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -222,6 +222,11 @@ struct Inner { builder_profit_threshold: Uint256, log: Logger, always_prefer_builder_payload: bool, + /// Track whether the last `newPayload` call errored. + /// + /// This is used *only* in the informational sync status endpoint, so that a VC using this + /// node can prefer another node with a healthier EL. + last_new_payload_errored: RwLock, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] @@ -350,6 +355,7 @@ impl ExecutionLayer { builder_profit_threshold: Uint256::from(builder_profit_threshold), log, always_prefer_builder_payload, + last_new_payload_errored: RwLock::new(false), }; Ok(Self { @@ -374,7 +380,7 @@ impl ExecutionLayer { /// Attempt to retrieve a full payload from the payload cache by the payload root pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { - self.inner.payload_cache.pop(root) + self.inner.payload_cache.get(root) } pub fn executor(&self) -> &TaskExecutor { @@ -542,6 +548,15 @@ impl ExecutionLayer { synced } + /// Return `true` if the execution layer is offline or returning errors on `newPayload`. + /// + /// This function should never be used to prevent any operation in the beacon node, but can + /// be used to give an indication on the HTTP API that the node's execution layer is struggling, + /// which can in turn be used by the VC. + pub async fn is_offline_or_erroring(&self) -> bool { + self.engine().is_offline().await || *self.inner.last_new_payload_errored.read().await + } + /// Updates the proposer preparation data provided by validators pub async fn update_proposer_preparation( &self, @@ -811,16 +826,23 @@ impl ExecutionLayer { let relay_value = relay.data.message.value; let local_value = *local.block_value(); - if !self.inner.always_prefer_builder_payload - && local_value >= relay_value - { - info!( - self.log(), - "Local block is more profitable than relay block"; - "local_block_value" => %local_value, - "relay_value" => %relay_value - ); - return Ok(ProvenancedPayload::Local(local)); + if !self.inner.always_prefer_builder_payload { + if local_value >= relay_value { + info!( + self.log(), + "Local block is more profitable than relay block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(local)); + } else { + info!( + self.log(), + "Relay block is more profitable than local block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + } } match verify_builder_bid( @@ -1116,18 +1138,6 @@ impl ExecutionLayer { } /// Maps to the `engine_newPayload` JSON-RPC call. - /// - /// ## Fallback Behaviour - /// - /// The request will be broadcast to all nodes, simultaneously. It will await a response (or - /// failure) from all nodes and then return based on the first of these conditions which - /// returns true: - /// - /// - Error::ConsensusFailure if some nodes return valid and some return invalid - /// - Valid, if any nodes return valid. - /// - Invalid, if any nodes return invalid. - /// - Syncing, if any nodes return syncing. - /// - An error, if all nodes return an error. pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, @@ -1156,12 +1166,18 @@ impl ExecutionLayer { &["new_payload", status.status.into()], ); } + *self.inner.last_new_payload_errored.write().await = result.is_err(); process_payload_status(execution_payload.block_hash(), result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } + /// Update engine sync status. + pub async fn upcheck(&self) { + self.engine().upcheck().await; + } + /// Register that the given `validator_index` is going to produce a block at `slot`. /// /// The block will be built atop `head_block_root` and the EL will need to prepare an @@ -1221,18 +1237,6 @@ impl ExecutionLayer { } /// Maps to the `engine_consensusValidated` JSON-RPC call. - /// - /// ## Fallback Behaviour - /// - /// The request will be broadcast to all nodes, simultaneously. It will await a response (or - /// failure) from all nodes and then return based on the first of these conditions which - /// returns true: - /// - /// - Error::ConsensusFailure if some nodes return valid and some return invalid - /// - Valid, if any nodes return valid. - /// - Invalid, if any nodes return invalid. - /// - Syncing, if any nodes return syncing. - /// - An error, if all nodes return an error. pub async fn notify_forkchoice_updated( &self, head_block_hash: ExecutionBlockHash, diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs index 60a8f2a95c..1722edff46 100644 --- a/beacon_node/execution_layer/src/payload_cache.rs +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -30,4 +30,8 @@ impl PayloadCache { pub fn pop(&self, root: &Hash256) -> Option> { self.payloads.lock().pop(&PayloadCacheId(*root)) } + + pub fn get(&self, hash: &Hash256) -> Option> { + self.payloads.lock().get(&PayloadCacheId(*hash)).cloned() + } } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e3c58cfc27..79468b2116 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -30,7 +30,12 @@ pub async fn handle_rpc( .map_err(|s| (s, GENERIC_ERROR_CODE))?; match method { - ETH_SYNCING => Ok(JsonValue::Bool(false)), + ETH_SYNCING => ctx + .syncing_response + .lock() + .clone() + .map(JsonValue::Bool) + .map_err(|message| (message, GENERIC_ERROR_CODE)), ETH_GET_BLOCK_BY_NUMBER => { let tag = params .get(0) @@ -145,7 +150,9 @@ pub async fn handle_rpc( // Canned responses set by block hash take priority. if let Some(status) = ctx.get_new_payload_status(request.block_hash()) { - return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); + return status + .map(|status| serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()) + .map_err(|message| (message, GENERIC_ERROR_CODE)); } let (static_response, should_import) = @@ -320,11 +327,15 @@ pub async fn handle_rpc( // Canned responses set by block hash take priority. if let Some(status) = ctx.get_fcu_payload_status(&head_block_hash) { - let response = JsonForkchoiceUpdatedV1Response { - payload_status: JsonPayloadStatusV1::from(status), - payload_id: None, - }; - return Ok(serde_json::to_value(response).unwrap()); + return status + .map(|status| { + let response = JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1::from(status), + payload_id: None, + }; + serde_json::to_value(response).unwrap() + }) + .map_err(|message| (message, GENERIC_ERROR_CODE)); } let mut response = ctx diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 9379a3c238..a8e7bab270 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -126,6 +126,7 @@ impl MockServer { hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), + syncing_response: Arc::new(Mutex::new(Ok(false))), engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)), _phantom: PhantomData, }); @@ -414,14 +415,25 @@ impl MockServer { self.ctx .new_payload_statuses .lock() - .insert(block_hash, status); + .insert(block_hash, Ok(status)); } pub fn set_fcu_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { self.ctx .fcu_payload_statuses .lock() - .insert(block_hash, status); + .insert(block_hash, Ok(status)); + } + + pub fn set_new_payload_error(&self, block_hash: ExecutionBlockHash, error: String) { + self.ctx + .new_payload_statuses + .lock() + .insert(block_hash, Err(error)); + } + + pub fn set_syncing_response(&self, res: Result) { + *self.ctx.syncing_response.lock() = res; } } @@ -478,8 +490,11 @@ pub struct Context { // // This is a more flexible and less stateful alternative to `static_new_payload_response` // and `preloaded_responses`. - pub new_payload_statuses: Arc>>, - pub fcu_payload_statuses: Arc>>, + pub new_payload_statuses: + Arc>>>, + pub fcu_payload_statuses: + Arc>>>, + pub syncing_response: Arc>>, pub engine_capabilities: Arc>, pub _phantom: PhantomData, @@ -489,14 +504,14 @@ impl Context { pub fn get_new_payload_status( &self, block_hash: &ExecutionBlockHash, - ) -> Option { + ) -> Option> { self.new_payload_statuses.lock().get(block_hash).cloned() } pub fn get_fcu_payload_status( &self, block_hash: &ExecutionBlockHash, - ) -> Option { + ) -> Option> { self.fcu_payload_statuses.lock().get(block_hash).cloned() } } diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index aaf6a7bea1..f99fcb55bf 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -1,11 +1,11 @@ -//! NOTE: These tests will not pass unless ganache is running on `ENDPOINT` (see below). +//! NOTE: These tests will not pass unless an anvil is running on `ENDPOINT` (see below). //! -//! You can start a suitable instance using the `ganache_test_node.sh` script in the `scripts` +//! You can start a suitable instance using the `anvil_test_node.sh` script in the `scripts` //! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; +use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use state_processing::is_valid_genesis_state; @@ -29,15 +29,14 @@ fn basic() { let mut spec = env.eth2_config().spec.clone(); env.runtime().block_on(async { - let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()) + let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()) .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let client = eth1.json_rpc_client(); - let now = web3 - .eth() - .block_number() + let now = client + .get_block_number() .await .map(|v| v.as_u64()) .expect("should get block number"); @@ -89,7 +88,7 @@ fn basic() { .map(|(_, state)| state) .expect("should finish waiting for genesis"); - // Note: using ganache these deposits are 1-per-block, therefore we know there should only be + // Note: using anvil these deposits are 1-per-block, therefore we know there should only be // the minimum number of validators. assert_eq!( state.validators().len(), diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 8f253e2f24..2b117b26ce 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,11 +36,11 @@ tree_hash = "0.5.0" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } +logging = { path = "../../common/logging" } ethereum_serde_utils = "0.5.0" operation_pool = { path = "../operation_pool" } sensitive_url = { path = "../../common/sensitive_url" } unused_port = {path = "../../common/unused_port"} -logging = { path = "../../common/logging" } store = { path = "../store" } [dev-dependencies] @@ -51,4 +51,4 @@ genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" -path = "tests/main.rs" +path = "tests/main.rs" \ No newline at end of file diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 828be8e576..299bc019c4 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -49,7 +49,7 @@ pub fn get_block_rewards( .map_err(beacon_chain_error)?; state - .build_all_caches(&chain.spec) + .build_caches(&chain.spec) .map_err(beacon_state_error)?; let mut reward_cache = Default::default(); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 096d99f3f1..27bcc4d8a1 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -31,15 +31,18 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SkipRandaoVerification, - ValidatorId, ValidatorStatus, + self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, + SkipRandaoVerification, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; +use logging::SSELoggingComponents; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; -use publish_blocks::ProvenancedBlock; +pub use publish_blocks::{ + publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, +}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -108,6 +111,7 @@ pub struct Context { pub network_senders: Option>, pub network_globals: Option>>, pub eth1_service: Option, + pub sse_logging_components: Option, pub log: Logger, } @@ -322,6 +326,7 @@ pub fn serve( }; let eth_v1 = single_version(V1); + let eth_v2 = single_version(V2); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -448,6 +453,9 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_components = ctx.sse_logging_components.clone(); + let sse_component_filter = warp::any().map(move || inner_components.clone()); + // Create a `warp` filter that provides access to local system information. let system_info = Arc::new(RwLock::new(sysinfo::System::new())); { @@ -1217,16 +1225,55 @@ pub fn serve( log: Logger| async move { publish_blocks::publish_block( None, - ProvenancedBlock::Local(block), + ProvenancedBlock::local(block), chain, &network_tx, log, + BroadcastValidation::default(), ) .await .map(|()| warp::reply().into_response()) }, ); + let post_beacon_blocks_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block: Arc>, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + match publish_blocks::publish_block( + None, + ProvenancedBlock::local(block), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + /* * beacon/blocks */ @@ -1245,9 +1292,52 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_blinded_block(block, chain, &network_tx, log) - .await - .map(|()| warp::reply().into_response()) + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + + let post_beacon_blinded_blocks_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block: SignedBeaconBlock>, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + match publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } }, ); @@ -2285,28 +2375,40 @@ pub fn serve( .and(chain_filter.clone()) .and_then( |network_globals: Arc>, chain: Arc>| { - blocking_json_task(move || { - let head_slot = chain.canonical_head.cached_head().head_slot(); - let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { - warp_utils::reject::custom_server_error("Unable to read slot clock".into()) - })?; - - // Taking advantage of saturating subtraction on slot. - let sync_distance = current_slot - head_slot; - - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; - - let syncing_data = api_types::SyncingData { - is_syncing: network_globals.sync_state.read().is_syncing(), - is_optimistic: Some(is_optimistic), - head_slot, - sync_distance, + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true }; - Ok(api_types::GenericResponse::from(syncing_data)) - }) + blocking_json_task(move || { + let head_slot = chain.canonical_head.cached_head().head_slot(); + let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to read slot clock".into(), + ) + })?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + + let syncing_data = api_types::SyncingData { + is_syncing: network_globals.sync_state.read().is_syncing(), + is_optimistic: Some(is_optimistic), + el_offline: Some(el_offline), + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) + }) + .await + } }, ); @@ -2316,24 +2418,41 @@ pub fn serve( .and(warp::path("health")) .and(warp::path::end()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_response_task(move || match *network_globals.sync_state.read() { - SyncState::SyncingFinalized { .. } - | SyncState::SyncingHead { .. } - | SyncState::SyncTransition - | SyncState::BackFillSyncing { .. } => Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::PARTIAL_CONTENT, - )), - SyncState::Synced => Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::OK, - )), - SyncState::Stalled => Err(warp_utils::reject::not_synced( - "sync stalled, beacon chain may not yet be initialized.".to_string(), - )), - }) - }); + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc>, chain: Arc>| { + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; + + blocking_response_task(move || { + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + + let is_syncing = !network_globals.sync_state.read().is_synced(); + + if el_offline { + Err(warp_utils::reject::not_synced("execution layer is offline".to_string())) + } else if is_syncing || is_optimistic { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::PARTIAL_CONTENT, + )) + } else { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::OK, + )) + } + }) + .await + } + }, + ); // GET node/peers/{peer_id} let get_node_peers_by_id = eth_v1 @@ -2826,7 +2945,7 @@ pub fn serve( // It's reasonably likely that two different validators produce // identical aggregates, especially if they're using the same beacon // node. - Err(AttnError::AttestationAlreadyKnown(_)) => continue, + Err(AttnError::AttestationSupersetKnown(_)) => continue, // If we've already seen this aggregator produce an aggregate, just // skip this one. // @@ -3717,6 +3836,44 @@ pub fn serve( }, ); + // Subscribe to logs via Server Side Events + // /lighthouse/logs + let lighthouse_log_events = warp::path("lighthouse") + .and(warp::path("logs")) + .and(warp::path::end()) + .and(sse_component_filter) + .and_then(|sse_component: Option| { + blocking_response_task(move || { + if let Some(logging_components) = sse_component { + // Build a JSON stream + let s = + BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| { + match msg { + Ok(data) => { + // Serialize to json + match data.to_json_string() { + // Send the json as a Server Side Event + Ok(json) => Ok(Event::default().data(json)), + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to serialize to JSON {}", e), + )), + } + } + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to receive event {}", e), + )), + } + }); + + Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) + } else { + Err(warp_utils::reject::custom_server_error( + "SSE Logging is not enabled".to_string(), + )) + } + }) + }); + // Define the ultimate set of routes that will be provided to the server. // Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`). let routes = warp::get() @@ -3784,6 +3941,7 @@ pub fn serve( .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) + .uor(lighthouse_log_events.boxed()) .recover(warp_utils::reject::handle_rejection), ) .boxed() @@ -3791,6 +3949,8 @@ pub fn serve( warp::post().and( post_beacon_blocks .uor(post_beacon_blinded_blocks) + .uor(post_beacon_blocks_v2) + .uor(post_beacon_blinded_blocks_v2) .uor(post_beacon_pool_attestations) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 1a5d5175bc..0f2f7b361c 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,13 +1,16 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ - BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, + NotifyExecutionLayer, }; +use eth2::types::BroadcastValidation; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; +use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; @@ -18,49 +21,114 @@ use types::{ }; use warp::Rejection; -pub enum ProvenancedBlock { +pub enum ProvenancedBlock> { /// The payload was built using a local EE. - Local(Arc>>), + Local(B, PhantomData), /// The payload was build using a remote builder (e.g., via a mev-boost /// compatible relay). - Builder(Arc>>), + Builder(B, PhantomData), +} + +impl> ProvenancedBlock { + pub fn local(block: B) -> Self { + Self::Local(block, PhantomData) + } + + pub fn builder(block: B) -> Self { + Self::Builder(block, PhantomData) + } } /// Handles a request from the HTTP API for full blocks. -pub async fn publish_block( +pub async fn publish_block>( block_root: Option, - provenanced_block: ProvenancedBlock, + provenanced_block: ProvenancedBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, + validation_level: BroadcastValidation, ) -> Result<(), Rejection> { let seen_timestamp = timestamp_now(); let (block, is_locally_built_block) = match provenanced_block { - ProvenancedBlock::Local(block) => (block, true), - ProvenancedBlock::Builder(block) => (block, false), + ProvenancedBlock::Local(block, _) => (block, true), + ProvenancedBlock::Builder(block, _) => (block, false), }; - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + let beacon_block = block.inner(); + let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); + debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); - debug!( - log, - "Signed block published to HTTP API"; - "slot" => block.slot() - ); + /* actually publish a block */ + let publish_block = move |block: Arc>, + sender, + log, + seen_timestamp| { + let publish_timestamp = timestamp_now(); + let publish_delay = publish_timestamp + .checked_sub(seen_timestamp) + .unwrap_or_else(|| Duration::from_secs(0)); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. + info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); - let message = PubsubMessage::BeaconBlock(block.clone()); - crate::publish_pubsub_message(network_tx, message)?; + let message = PubsubMessage::BeaconBlock(block); + crate::publish_pubsub_message(&sender, message) + .map_err(|_| BeaconChainError::UnableToPublish.into()) + }; - let block_root = block_root.unwrap_or_else(|| block.canonical_root()); + /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ + let gossip_verified_block = block.into_gossip_verified_block(&chain).map_err(|e| { + warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e); + warp_utils::reject::custom_bad_request(e.to_string()) + })?; + + let block_root = block_root.unwrap_or(gossip_verified_block.block_root); + + if let BroadcastValidation::Gossip = validation_level { + publish_block( + beacon_block.clone(), + network_tx.clone(), + log.clone(), + seen_timestamp, + ) + .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; + } + + /* only publish if gossip- and consensus-valid and equivocation-free */ + let chain_clone = chain.clone(); + let block_clone = beacon_block.clone(); + let log_clone = log.clone(); + let sender_clone = network_tx.clone(); + + let publish_fn = move || match validation_level { + BroadcastValidation::Gossip => Ok(()), + BroadcastValidation::Consensus => { + publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + } + BroadcastValidation::ConsensusAndEquivocation => { + if chain_clone + .observed_block_producers + .read() + .proposer_has_been_observed(block_clone.message(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + .is_slashable() + { + warn!( + log_clone, + "Not publishing equivocating block"; + "slot" => block_clone.slot() + ); + Err(BlockError::Slashable) + } else { + publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + } + } + }; match chain .process_block( block_root, - block.clone(), - CountUnrealized::True, + gossip_verified_block, NotifyExecutionLayer::Yes, + publish_fn, ) .await { @@ -70,14 +138,14 @@ pub async fn publish_block( "Valid block from HTTP API"; "block_delay" => ?delay, "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), + "proposer_index" => beacon_block.message().proposer_index(), + "slot" => beacon_block.slot(), ); // Notify the validator monitor. chain.validator_monitor.read().register_api_block( seen_timestamp, - block.message(), + beacon_block.message(), root, &chain.slot_clock, ); @@ -90,40 +158,44 @@ pub async fn publish_block( // blocks built with builders we consider the broadcast time to be // when the blinded block is published to the builder. if is_locally_built_block { - late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) + late_block_logging( + &chain, + seen_timestamp, + beacon_block.message(), + root, + "local", + &log, + ) } Ok(()) } - Err(BlockError::BlockIsAlreadyKnown) => { - info!( - log, - "Block from HTTP API already known"; - "block" => ?block.canonical_root(), - "slot" => block.slot(), - ); - Ok(()) + Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { + Err(warp_utils::reject::custom_server_error( + "unable to publish to network channel".to_string(), + )) } - Err(BlockError::RepeatProposal { proposer, slot }) => { - warn!( - log, - "Block ignored due to repeat proposal"; - "msg" => "this can happen when a VC uses fallback BNs. \ - whilst this is not necessarily an error, it can indicate issues with a BN \ - or between the VC and BN.", - "slot" => slot, - "proposer" => proposer, - ); + Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( + "proposal for this slot and proposer has already been seen".to_string(), + )), + Err(BlockError::BlockIsAlreadyKnown) => { + info!(log, "Block from HTTP API already known"; "block" => ?block_root); Ok(()) } Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) + if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) + } else { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(format!( + "Invalid block: {e}" + ))) + } } } } @@ -135,21 +207,31 @@ pub async fn publish_blinded_block( chain: Arc>, network_tx: &UnboundedSender>, log: Logger, + validation_level: BroadcastValidation, ) -> Result<(), Rejection> { let block_root = block.canonical_root(); - let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; - publish_block::(Some(block_root), full_block, chain, network_tx, log).await + let full_block: ProvenancedBlock>> = + reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; + publish_block::( + Some(block_root), + full_block, + chain, + network_tx, + log, + validation_level, + ) + .await } /// Deconstruct the given blinded block, and construct a full block. This attempts to use the /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve /// the full payload. -async fn reconstruct_block( +pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, block: SignedBeaconBlock>, log: Logger, -) -> Result, Rejection> { +) -> Result>>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -215,15 +297,15 @@ async fn reconstruct_block( None => block .try_into_full_block(None) .map(Arc::new) - .map(ProvenancedBlock::Local), + .map(ProvenancedBlock::local), Some(ProvenancedPayload::Local(full_payload)) => block .try_into_full_block(Some(full_payload)) .map(Arc::new) - .map(ProvenancedBlock::Local), + .map(ProvenancedBlock::local), Some(ProvenancedPayload::Builder(full_payload)) => block .try_into_full_block(Some(full_payload)) .map(Arc::new) - .map(ProvenancedBlock::Builder), + .map(ProvenancedBlock::builder), } .ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index a6acf308fa..07dfb5c988 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -199,10 +199,14 @@ pub fn process_sync_committee_signatures( Err(SyncVerificationError::PriorSyncCommitteeMessageKnown { validator_index, slot, + prev_root, + new_root, }) => { debug!( log, "Ignoring already-known sync message"; + "new_root" => ?new_root, + "prev_root" => ?prev_root, "slot" => slot, "validator_index" => validator_index, ); @@ -300,7 +304,7 @@ pub fn process_signed_contribution_and_proofs( } // If we already know the contribution, don't broadcast it or attempt to // further verify it. Return success. - Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, + Err(SyncVerificationError::SyncContributionSupersetKnown(_)) => continue, // If we've already seen this aggregator produce an aggregate, just // skip this one. // diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 8dc9be7dd4..9880a8ca61 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -198,6 +198,7 @@ pub async fn create_api_server_on_port( network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), + sse_logging_components: None, log, }); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs new file mode 100644 index 0000000000..4819dd99e7 --- /dev/null +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -0,0 +1,1270 @@ +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy}, + GossipVerifiedBlock, +}; +use eth2::types::{BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock}; +use http_api::test_utils::InteractiveTester; +use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use tree_hash::TreeHash; +use types::{Hash256, MainnetEthSpec, Slot}; +use warp::Rejection; +use warp_utils::reject::CustomBadRequest; + +use eth2::reqwest::StatusCode; + +type E = MainnetEthSpec; + +/* + * We have the following test cases, which are duplicated for the blinded variant of the route: + * + * - `broadcast_validation=gossip` + * - Invalid (400) + * - Full Pass (200) + * - Partial Pass (202) + * - `broadcast_validation=consensus` + * - Invalid (400) + * - Only gossip (400) + * - Only consensus pass (i.e., equivocates) (200) + * - Full pass (200) + * - `broadcast_validation=consensus_and_equivocation` + * - Invalid (400) + * - Invalid due to early equivocation (400) + * - Only gossip (400) + * - Only consensus (400) + * - Pass (200) + * + */ + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_partial_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::random() + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response = response.unwrap_err(); + + assert_eq!(error_response.status(), Some(StatusCode::ACCEPTED)); +} + +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_invalid() { + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective, but nonetheless equivocates, is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_partial_pass_only_consensus() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + /* submit `block_b` which should induce equivocation */ + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_block( + None, + ProvenancedBlock::local(gossip_block_b.unwrap()), + tester.harness.chain.clone(), + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b.canonical_root())); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_consensus_early_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + /* submit `block_a` as valid */ + assert!(tester + .client + .post_beacon_blocks_v2(&block_a, validation_level) + .await + .is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); + + /* submit `block_b` which should induce equivocation */ + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_b, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_consensus_late_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_block( + None, + ProvenancedBlock::local(gossip_block_b.unwrap()), + tester.harness.chain, + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_err()); + + let publication_error = publication_result.unwrap_err(); + + assert!(publication_error.find::().is_some()); + + assert_eq!( + *publication_error.find::().unwrap().0, + "proposal for this slot and proposer has already been seen".to_string() + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective (and does not equivocate) is accepted when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_partial_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero() + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response = response.unwrap_err(); + + assert_eq!(error_response.status(), Some(StatusCode::ACCEPTED)); +} + +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_consensus_early_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + .harness + .make_blinded_block(state_a.clone(), slot_b) + .await; + let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + /* check for `make_blinded_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + /* submit `block_a` as valid */ + assert!(tester + .client + .post_beacon_blinded_blocks_v2(&block_a, validation_level) + .await + .is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); + + /* submit `block_b` which should induce equivocation */ + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block_b, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_consensus_late_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + .harness + .make_blinded_block(state_a.clone(), slot_b) + .await; + let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + /* check for `make_blinded_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let unblinded_block_a = reconstruct_block( + tester.harness.chain.clone(), + block_a.state_root(), + block_a, + test_logger.clone(), + ) + .await + .unwrap(); + let unblinded_block_b = reconstruct_block( + tester.harness.chain.clone(), + block_b.clone().state_root(), + block_b.clone(), + test_logger.clone(), + ) + .await + .unwrap(); + + let inner_block_a = match unblinded_block_a { + ProvenancedBlock::Local(a, _) => a, + ProvenancedBlock::Builder(a, _) => a, + }; + let inner_block_b = match unblinded_block_b { + ProvenancedBlock::Local(b, _) => b, + ProvenancedBlock::Builder(b, _) => b, + }; + + let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_blinded_block( + block_b, + tester.harness.chain, + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_err()); + + let publication_error: Rejection = publication_result.unwrap_err(); + + assert!(publication_error.find::().is_some()); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective (and does not equivocate) is accepted when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index 342b72cc7d..e0636424e4 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,5 +1,7 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. +pub mod broadcast_validation_tests; pub mod fork_tests; pub mod interactive_tests; +pub mod status_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs new file mode 100644 index 0000000000..95f885faa5 --- /dev/null +++ b/beacon_node/http_api/tests/status_tests.rs @@ -0,0 +1,225 @@ +//! Tests related to the beacon node's sync status +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + BlockError, +}; +use eth2::StatusCode; +use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; +use http_api::test_utils::InteractiveTester; +use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; + +type E = MinimalEthSpec; + +/// Create a new test environment that is post-merge with `chain_depth` blocks. +async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> InteractiveTester { + // Test using latest fork so that we simulate conditions as similar to mainnet as possible. + let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); + spec.terminal_total_difficulty = 1.into(); + + let tester = InteractiveTester::::new(Some(spec), validator_count as usize).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + let execution_ctx = mock_el.server.ctx.clone(); + + // Move to terminal block. + mock_el.server.all_payloads_valid(); + execution_ctx + .execution_block_generator + .write() + .move_to_terminal_block() + .unwrap(); + + // Create some chain depth. + harness.advance_slot(); + harness + .extend_chain_with_sync( + chain_depth as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::AllValidators, + ) + .await; + tester +} + +/// Check `syncing` endpoint when the EL is syncing. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn el_syncing_then_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL syncing + mock_el.server.set_syncing_response(Ok(true)); + mock_el.el.upcheck().await; + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(false)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); + + // EL synced + mock_el.server.set_syncing_response(Ok(false)); + mock_el.el.upcheck().await; + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(false)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); +} + +/// Check `syncing` endpoint when the EL is offline (errors on upcheck). +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn el_offline() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL offline + mock_el.server.set_syncing_response(Err("offline".into())); + mock_el.el.upcheck().await; + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(true)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); +} + +/// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn el_error_on_new_payload() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // Make a block. + let pre_state = harness.get_current_state(); + let (block, _) = harness + .make_block(pre_state, Slot::new(num_blocks + 1)) + .await; + let block_hash = block + .message() + .body() + .execution_payload() + .unwrap() + .block_hash(); + + // Make sure `newPayload` errors for the new block. + mock_el + .server + .set_new_payload_error(block_hash, "error".into()); + + // Attempt to process the block, which should error. + harness.advance_slot(); + assert!(matches!( + harness.process_block_result(block.clone()).await, + Err(BlockError::ExecutionPayloadError(_)) + )); + + // The EL should now be *offline* according to the API. + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(true)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); + + // Processing a block successfully should remove the status. + mock_el.server.set_new_payload_status( + block_hash, + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: Some(block_hash), + validation_error: None, + }, + ); + harness.process_block_result(block).await.unwrap(); + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(false)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); +} + +/// Check `node health` endpoint when the EL is offline. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_offline() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL offline + mock_el.server.set_syncing_response(Err("offline".into())); + mock_el.el.upcheck().await; + + let status = tester.client.get_node_health().await; + match status { + Ok(_) => { + panic!("should return 503 error status code"); + } + Err(e) => { + assert_eq!(e.status().unwrap(), 503); + } + } +} + +/// Check `node health` endpoint when the EL is online and synced. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_online_and_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL synced + mock_el.server.set_syncing_response(Ok(false)); + mock_el.el.upcheck().await; + + let status = tester.client.get_node_health().await; + match status { + Ok(response) => { + assert_eq!(response, StatusCode::OK); + } + Err(_) => { + panic!("should return 200 status code"); + } + } +} + +/// Check `node health` endpoint when the EL is online but not synced. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_online_and_not_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL not synced + harness.advance_slot(); + mock_el.server.all_payloads_syncing(true); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let status = tester.client.get_node_health().await; + match status { + Ok(response) => { + assert_eq!(response, StatusCode::PARTIAL_CONTENT); + } + Err(_) => { + panic!("should return 206 status code"); + } + } +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index fc78b2a9bf..741ee1ffc0 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -8,7 +8,7 @@ use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, - BeaconNodeHttpClient, Error, StatusCode, Timeouts, + BeaconNodeHttpClient, Error, Timeouts, }; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; @@ -159,7 +159,7 @@ impl ApiTester { // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness - .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; let head_state_root = head.beacon_state_root(); @@ -1248,14 +1248,23 @@ impl ApiTester { } pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { - let mut next_block = self.next_block.clone(); - *next_block.message_mut().proposer_index_mut() += 1; + let block = self + .harness + .make_block_with_modifier( + self.harness.get_current_state(), + self.harness.get_current_slot(), + |b| { + *b.state_root_mut() = Hash256::zero(); + }, + ) + .await + .0; - assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); + assert!(self.client.post_beacon_blocks(&block).await.is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), - "invalid blocks should be sent to network" + "gossip valid blocks should be sent to network" ); self @@ -1721,6 +1730,8 @@ impl ApiTester { let expected = SyncingData { is_syncing: false, is_optimistic: Some(false), + // these tests run without the Bellatrix fork enabled + el_offline: Some(true), head_slot, sync_distance, }; @@ -1751,9 +1762,15 @@ impl ApiTester { } pub async fn test_get_node_health(self) -> Self { - let status = self.client.get_node_health().await.unwrap(); - assert_eq!(status, StatusCode::OK); - + let status = self.client.get_node_health().await; + match status { + Ok(_) => { + panic!("should return 503 error status code"); + } + Err(e) => { + assert_eq!(e.status().unwrap(), 503); + } + } self } @@ -4124,7 +4141,7 @@ impl ApiTester { .unwrap(); let expected_reorg = EventKind::ChainReorg(SseChainReorg { - slot: self.next_block.slot(), + slot: self.reorg_block.slot(), depth: 1, old_head_block: self.next_block.canonical_root(), old_head_state: self.next_block.state_root(), @@ -4134,6 +4151,8 @@ impl ApiTester { execution_optimistic: false, }); + self.harness.advance_slot(); + self.client .post_beacon_blocks(&self.reorg_block) .await diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index 89fde32374..b88a790afd 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -38,7 +38,7 @@ async fn returns_200_ok() { }; let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap(); - tokio::spawn(async { server.await }); + tokio::spawn(server); let url = format!( "http://{}:{}/metrics", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c1b4d72174..6d056d8350 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,10 +5,10 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -discv5 = { version = "0.2.2", features = ["libp2p"] } +discv5 = { version = "0.3.0", features = ["libp2p"]} unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -ssz_types = "0.5.0" +ssz_types = "0.5.3" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" ethereum_ssz = "0.5.0" @@ -60,4 +60,4 @@ quickcheck = "0.9.2" quickcheck_macros = "0.9.1" [features] -libp2p-websocket = [] +libp2p-websocket = [] \ No newline at end of file diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index f4b3b78d04..9467526458 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -1,5 +1,5 @@ use crate::listen_addr::{ListenAddr, ListenAddress}; -use crate::rpc::config::OutboundRateLimiterConfig; +use crate::rpc::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; use directory::{ @@ -145,6 +145,12 @@ pub struct Config { /// Configuration for the outbound rate limiter (requests made by this node). pub outbound_rate_limiter_config: Option, + + /// Configures if/where invalid blocks should be stored. + pub invalid_block_storage: Option, + + /// Configuration for the inbound rate limiter (requests received by this node). + pub inbound_rate_limiter_config: Option, } impl Config { @@ -157,7 +163,7 @@ impl Config { udp_port, tcp_port, }); - self.discv5_config.ip_mode = discv5::IpMode::Ip4; + self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port); self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) } @@ -170,9 +176,8 @@ impl Config { udp_port, tcp_port, }); - self.discv5_config.ip_mode = discv5::IpMode::Ip6 { - enable_mapped_addresses: false, - }; + + self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port); self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) } @@ -200,10 +205,10 @@ impl Config { tcp_port: tcp6_port, }, ); + self.discv5_config.listen_config = discv5::ListenConfig::default() + .with_ipv4(v4_addr, udp4_port) + .with_ipv6(v6_addr, udp6_port); - self.discv5_config.ip_mode = discv5::IpMode::Ip6 { - enable_mapped_addresses: true, - }; self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) { (None, None) => false, (None, Some(ip6)) => is_global_ipv6(ip6), @@ -273,9 +278,17 @@ impl Default for Config { .build() .expect("The total rate limit has been specified"), ); + let listen_addresses = ListenAddress::V4(ListenAddr { + addr: Ipv4Addr::UNSPECIFIED, + udp_port: 9000, + tcp_port: 9000, + }); + + let discv5_listen_config = + discv5::ListenConfig::from_ip(Ipv4Addr::UNSPECIFIED.into(), 9000); // discv5 configuration - let discv5_config = Discv5ConfigBuilder::new() + let discv5_config = Discv5ConfigBuilder::new(discv5_listen_config) .enable_packet_filter() .session_cache_capacity(5000) .request_timeout(Duration::from_secs(1)) @@ -298,12 +311,9 @@ impl Default for Config { // NOTE: Some of these get overridden by the corresponding CLI default values. Config { network_dir, - listen_addresses: ListenAddress::V4(ListenAddr { - addr: Ipv4Addr::UNSPECIFIED, - udp_port: 9000, - tcp_port: 9000, - }), + listen_addresses, enr_address: (None, None), + enr_udp4_port: None, enr_tcp4_port: None, enr_udp6_port: None, @@ -329,6 +339,8 @@ impl Default for Config { metrics_enabled: false, enable_light_client_server: false, outbound_rate_limiter_config: None, + invalid_block_storage: None, + inbound_rate_limiter_config: None, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 938e7cfa25..f85c4b3e5c 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -213,13 +213,17 @@ pub fn build_enr( fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk_enr address if one is not specified (local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4()) + && + (local_enr.ip6().is_none() || local_enr.ip6() == disk_enr.ip6()) // tcp ports must match && local_enr.tcp4() == disk_enr.tcp4() + && local_enr.tcp6() == disk_enr.tcp6() // must match on the same fork && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) - // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, + && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, // otherwise we use a new ENR. This will likely only be true for non-validating nodes && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index e9cca6667a..3df7f7c16f 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -198,7 +198,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey { fn as_peer_id(&self) -> PeerId { match self { Self::Secp256k1(pk) => { - let pk_bytes = pk.to_bytes(); + let pk_bytes = pk.to_sec1_bytes(); let libp2p_pk = libp2p::core::PublicKey::Secp256k1( libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes) .expect("valid public key"), @@ -222,14 +222,16 @@ impl CombinedKeyExt for CombinedKey { match key { Keypair::Secp256k1(key) => { let secret = - discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes()) + discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes()) .expect("libp2p key must be valid"); Ok(CombinedKey::Secp256k1(secret)) } Keypair::Ed25519(key) => { - let ed_keypair = - discv5::enr::ed25519_dalek::SecretKey::from_bytes(&key.encode()[..32]) - .expect("libp2p key must be valid"); + let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( + &(key.encode()[..32]) + .try_into() + .expect("libp2p key must be valid"), + ); Ok(CombinedKey::from(ed_keypair)) } Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), @@ -281,7 +283,7 @@ mod tests { fn test_secp256k1_peer_id_conversion() { let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48"; let sk_bytes = hex::decode(sk_hex).unwrap(); - let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap(); + let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap(); let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); @@ -300,16 +302,18 @@ mod tests { fn test_ed25519_peer_conversion() { let sk_hex = "4dea8a5072119927e9d243a7d953f2f4bc95b70f110978e2f9bc7a9000e4b261"; let sk_bytes = hex::decode(sk_hex).unwrap(); - let secret = discv5::enr::ed25519_dalek::SecretKey::from_bytes(&sk_bytes).unwrap(); - let public = discv5::enr::ed25519_dalek::PublicKey::from(&secret); - let keypair = discv5::enr::ed25519_dalek::Keypair { secret, public }; + let secret_key = discv5::enr::ed25519_dalek::SigningKey::from_bytes( + &sk_bytes.clone().try_into().unwrap(), + ); let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap(); - let ed25519_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); - let libp2p_kp = Keypair::Ed25519(ed25519_kp); + let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); + let libp2p_kp = Keypair::Ed25519(secp256k1_kp); let peer_id = libp2p_kp.public().to_peer_id(); - let enr = discv5::enr::EnrBuilder::new("v4").build(&keypair).unwrap(); + let enr = discv5::enr::EnrBuilder::new("v4") + .build(&secret_key) + .unwrap(); let node_id = peer_id_to_node_id(&peer_id).unwrap(); assert_eq!(enr.node_id(), node_id); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 13fdf8ed57..3ee74ebf01 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -209,13 +209,6 @@ impl Discovery { info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6() ); - let listen_socket = match config.listen_addrs() { - crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(), - crate::listen_addr::ListenAddress::V6(v6_addr) => v6_addr.udp_socket_addr(), - crate::listen_addr::ListenAddress::DualStack(_v4_addr, v6_addr) => { - v6_addr.udp_socket_addr() - } - }; // convert the keypair into an ENR key let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?; @@ -251,10 +244,7 @@ impl Discovery { // Start the discv5 service and obtain an event stream let event_stream = if !config.disable_discovery { - discv5 - .start(listen_socket) - .map_err(|e| e.to_string()) - .await?; + discv5.start().map_err(|e| e.to_string()).await?; debug!(log, "Discovery service started"); EventStream::Awaiting(Box::pin(discv5.event_stream())) } else { @@ -413,7 +403,7 @@ impl Discovery { /// If the external address needs to be modified, use `update_enr_udp_socket. pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> { self.discv5 - .enr_insert("tcp", &port.to_be_bytes()) + .enr_insert("tcp", &port) .map_err(|e| format!("{:?}", e))?; // replace the global version @@ -428,29 +418,12 @@ impl Discovery { /// This is with caution. Discovery should automatically maintain this. This should only be /// used when automatic discovery is disabled. pub fn update_enr_udp_socket(&mut self, socket_addr: SocketAddr) -> Result<(), String> { - match socket_addr { - SocketAddr::V4(socket) => { - self.discv5 - .enr_insert("ip", &socket.ip().octets()) - .map_err(|e| format!("{:?}", e))?; - self.discv5 - .enr_insert("udp", &socket.port().to_be_bytes()) - .map_err(|e| format!("{:?}", e))?; - } - SocketAddr::V6(socket) => { - self.discv5 - .enr_insert("ip6", &socket.ip().octets()) - .map_err(|e| format!("{:?}", e))?; - self.discv5 - .enr_insert("udp6", &socket.port().to_be_bytes()) - .map_err(|e| format!("{:?}", e))?; - } + const IS_TCP: bool = false; + if self.discv5.update_local_enr_socket(socket_addr, IS_TCP) { + // persist modified enr to disk + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); } - - // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); - // persist modified enr to disk - enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); Ok(()) } diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index e324532f7b..f79ff8daf6 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -1,4 +1,4 @@ -///! The subnet predicate used for searching for a particular subnet. +//! The subnet predicate used for searching for a particular subnet. use super::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use slog::trace; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index b2096013bf..c6c737caed 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1266,7 +1266,7 @@ impl PeerManager { ); } - let mut score_peers: &mut (f64, usize) = avg_score_per_client + let score_peers: &mut (f64, usize) = avg_score_per_client .entry(peer_info.client().kind.to_string()) .or_default(); score_peers.0 += peer_info.score().score(); diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 6c6ce2da32..d568f27897 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -214,8 +214,7 @@ mod tests { let mut buf = BytesMut::new(); buf.extend_from_slice(&message); - let snappy_protocol_id = - ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(ForkName::Base)); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( @@ -249,8 +248,7 @@ mod tests { // Insert length-prefix uvi_codec.encode(len, &mut dst).unwrap(); - let snappy_protocol_id = - ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(ForkName::Base)); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( @@ -277,8 +275,7 @@ mod tests { dst } - let protocol_id = - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy); + let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy); // Response limits let fork_context = Arc::new(fork_context(ForkName::Base)); diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 28fea40a20..39cf8b3eb2 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -1,9 +1,9 @@ +use crate::rpc::methods::*; use crate::rpc::{ codec::base::OutboundCodec, - protocol::{Encoding, Protocol, ProtocolId, RPCError, Version, ERROR_TYPE_MAX, ERROR_TYPE_MIN}, + protocol::{Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN}, }; use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse}; -use crate::{rpc::methods::*, EnrSyncCommitteeBitfield}; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; @@ -76,27 +76,14 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. { - match self.protocol.version { - Version::V1 => MetaData::::V1(MetaDataV1 { - seq_number: *res.seq_number(), - attnets: res.attnets().clone(), - }) - .as_ssz_bytes(), - Version::V2 => { - // `res` is of type MetaDataV2, return the ssz bytes - if res.syncnets().is_ok() { - res.as_ssz_bytes() - } else { - // `res` is of type MetaDataV1, create a MetaDataV2 by adding a default syncnets field - // Note: This code path is redundant as `res` would be always of type MetaDataV2 - MetaData::::V2(MetaDataV2 { - seq_number: *res.seq_number(), - attnets: res.attnets().clone(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }) - .as_ssz_bytes() - } - } + match self.protocol.versioned_protocol { + SupportedProtocol::MetaDataV1 => res.metadata_v1().as_ssz_bytes(), + // We always send V2 metadata responses from the behaviour + // No change required. + SupportedProtocol::MetaDataV2 => res.metadata_v2().as_ssz_bytes(), + _ => unreachable!( + "We only send metadata responses on negotiating metadata requests" + ), } } }, @@ -139,8 +126,11 @@ impl Decoder for SSZSnappyInboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - if self.protocol.message_name == Protocol::MetaData { - return Ok(Some(InboundRequest::MetaData(PhantomData))); + if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV1 { + return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))); + } + if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { + return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); } let length = match handle_length(&mut self.inner, &mut self.len, src)? { Some(len) => len, @@ -152,8 +142,8 @@ impl Decoder for SSZSnappyInboundCodec { let ssz_limits = self.protocol.rpc_request_limits(); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData(format!( - "RPC request length is out of bounds, length {}", - length + "RPC request length for protocol {:?} is out of bounds, length {}", + self.protocol.versioned_protocol, length ))); } // Calculate worst case compression length for given uncompressed length @@ -170,11 +160,7 @@ impl Decoder for SSZSnappyInboundCodec { let n = reader.get_ref().get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); - - match self.protocol.version { - Version::V1 => handle_v1_request(self.protocol.message_name, &decoded_buffer), - Version::V2 => handle_v2_request(self.protocol.message_name, &decoded_buffer), - } + handle_rpc_request(self.protocol.versioned_protocol, &decoded_buffer) } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), } @@ -228,11 +214,16 @@ impl Encoder> for SSZSnappyOutboundCodec< let bytes = match item { OutboundRequest::Status(req) => req.as_ssz_bytes(), OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), - OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(), - OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), + OutboundRequest::BlocksByRange(r) => match r { + OldBlocksByRangeRequest::V1(req) => req.as_ssz_bytes(), + OldBlocksByRangeRequest::V2(req) => req.as_ssz_bytes(), + }, + OutboundRequest::BlocksByRoot(r) => match r { + BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), + BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), + }, OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode - OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), }; // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { @@ -311,15 +302,10 @@ impl Decoder for SSZSnappyOutboundCodec { let n = reader.get_ref().get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); - - match self.protocol.version { - Version::V1 => handle_v1_response(self.protocol.message_name, &decoded_buffer), - Version::V2 => handle_v2_response( - self.protocol.message_name, - &decoded_buffer, - &mut self.fork_name, - ), - } + // Safe to `take` from `self.fork_name` as we have all the bytes we need to + // decode an ssz object at this point. + let fork_name = self.fork_name.take(); + handle_rpc_response(self.protocol.versioned_protocol, &decoded_buffer, fork_name) } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), } @@ -456,181 +442,150 @@ fn handle_length( } } -/// Decodes a `Version::V1` `InboundRequest` from the byte stream. +/// Decodes an `InboundRequest` from the byte stream. /// `decoded_buffer` should be an ssz-encoded bytestream with // length = length-prefix received in the beginning of the stream. -fn handle_v1_request( - protocol: Protocol, +fn handle_rpc_request( + versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], ) -> Result>, RPCError> { - match protocol { - Protocol::Status => Ok(Some(InboundRequest::Status(StatusMessage::from_ssz_bytes( - decoded_buffer, - )?))), - Protocol::Goodbye => Ok(Some(InboundRequest::Goodbye( + match versioned_protocol { + SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status( + StatusMessage::from_ssz_bytes(decoded_buffer)?, + ))), + SupportedProtocol::GoodbyeV1 => Ok(Some(InboundRequest::Goodbye( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), - Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( - OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + SupportedProtocol::BlocksByRangeV2 => Ok(Some(InboundRequest::BlocksByRange( + OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2::from_ssz_bytes(decoded_buffer)?), ))), - Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, - }))), - Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { + SupportedProtocol::BlocksByRangeV1 => Ok(Some(InboundRequest::BlocksByRange( + OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1::from_ssz_bytes(decoded_buffer)?), + ))), + SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot( + BlocksByRootRequest::V2(BlocksByRootRequestV2 { + block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + }), + ))), + SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot( + BlocksByRootRequest::V1(BlocksByRootRequestV1 { + block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + }), + ))), + SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap( - LightClientBootstrapRequest { + SupportedProtocol::LightClientBootstrapV1 => Ok(Some( + InboundRequest::LightClientBootstrap(LightClientBootstrapRequest { root: Hash256::from_ssz_bytes(decoded_buffer)?, - }, - ))), + }), + )), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. - Protocol::MetaData => { + SupportedProtocol::MetaDataV2 => { if !decoded_buffer.is_empty() { Err(RPCError::InternalError( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(PhantomData))) + Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))) } } - } -} - -/// Decodes a `Version::V2` `InboundRequest` from the byte stream. -/// `decoded_buffer` should be an ssz-encoded bytestream with -// length = length-prefix received in the beginning of the stream. -fn handle_v2_request( - protocol: Protocol, - decoded_buffer: &[u8], -) -> Result>, RPCError> { - match protocol { - Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( - OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, - ))), - Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, - }))), - // MetaData requests return early from InboundUpgrade and do not reach the decoder. - // Handle this case just for completeness. - Protocol::MetaData => { + SupportedProtocol::MetaDataV1 => { if !decoded_buffer.is_empty() { Err(RPCError::InvalidData("Metadata request".to_string())) } else { - Ok(Some(InboundRequest::MetaData(PhantomData))) + Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))) } } - _ => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, - format!("{} does not support version 2", protocol), - )), } } -/// Decodes a `Version::V1` `RPCResponse` from the byte stream. +/// Decodes a `RPCResponse` from the byte stream. /// `decoded_buffer` should be an ssz-encoded bytestream with -// length = length-prefix received in the beginning of the stream. -fn handle_v1_response( - protocol: Protocol, - decoded_buffer: &[u8], -) -> Result>, RPCError> { - match protocol { - Protocol::Status => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes( - decoded_buffer, - )?))), - // This case should be unreachable as `Goodbye` has no response. - Protocol::Goodbye => Err(RPCError::InvalidData( - "Goodbye RPC message has no valid response".to_string(), - )), - Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { - data: u64::from_ssz_bytes(decoded_buffer)?, - }))), - Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( - MetaDataV1::from_ssz_bytes(decoded_buffer)?, - )))), - Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap( - LightClientBootstrap::from_ssz_bytes(decoded_buffer)?, - ))), - } -} - -/// Decodes a `Version::V2` `RPCResponse` from the byte stream. -/// `decoded_buffer` should be an ssz-encoded bytestream with -// length = length-prefix received in the beginning of the stream. +/// length = length-prefix received in the beginning of the stream. /// /// For BlocksByRange/BlocksByRoot reponses, decodes the appropriate response /// according to the received `ForkName`. -fn handle_v2_response( - protocol: Protocol, +fn handle_rpc_response( + versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], - fork_name: &mut Option, + fork_name: Option, ) -> Result>, RPCError> { - // MetaData does not contain context_bytes - if let Protocol::MetaData = protocol { - Ok(Some(RPCResponse::MetaData(MetaData::V2( + match versioned_protocol { + SupportedProtocol::StatusV1 => Ok(Some(RPCResponse::Status( + StatusMessage::from_ssz_bytes(decoded_buffer)?, + ))), + // This case should be unreachable as `Goodbye` has no response. + SupportedProtocol::GoodbyeV1 => Err(RPCError::InvalidData( + "Goodbye RPC message has no valid response".to_string(), + )), + SupportedProtocol::BlocksByRangeV1 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { + data: u64::from_ssz_bytes(decoded_buffer)?, + }))), + SupportedProtocol::MetaDataV1 => Ok(Some(RPCResponse::MetaData(MetaData::V1( + MetaDataV1::from_ssz_bytes(decoded_buffer)?, + )))), + SupportedProtocol::LightClientBootstrapV1 => Ok(Some(RPCResponse::LightClientBootstrap( + LightClientBootstrap::from_ssz_bytes(decoded_buffer)?, + ))), + // MetaData V2 responses have no context bytes, so behave similarly to V1 responses + SupportedProtocol::MetaDataV2 => Ok(Some(RPCResponse::MetaData(MetaData::V2( MetaDataV2::from_ssz_bytes(decoded_buffer)?, - )))) - } else { - let fork_name = fork_name.take().ok_or_else(|| { - RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, - format!("No context bytes provided for {} response", protocol), - ) - })?; - match protocol { - Protocol::BlocksByRange => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( - decoded_buffer, - )?), - )))), + )))), + SupportedProtocol::BlocksByRangeV2 => match fork_name { + Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), + )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( - decoded_buffer, - )?), - )))), - ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( - decoded_buffer, - )?), - )))), - }, - Protocol::BlocksByRoot => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( - SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( - decoded_buffer, - )?), - )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( - SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( - decoded_buffer, - )?), - )))), - ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( - SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( - decoded_buffer, - )?), - )))), - }, - _ => Err(RPCError::ErrorResponse( + Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + Some(ForkName::Merge) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(decoded_buffer)?), + )))), + Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), + None => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Invalid v2 request".to_string(), + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), )), - } + }, + SupportedProtocol::BlocksByRootV2 => match fork_name { + Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), + )))), + Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + Some(ForkName::Merge) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(decoded_buffer)?), + )))), + Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, } } @@ -742,18 +697,20 @@ mod tests { } } - fn bbrange_request() -> OldBlocksByRangeRequest { - OldBlocksByRangeRequest { - start_slot: 0, - count: 10, - step: 1, - } + fn bbrange_request_v1() -> OldBlocksByRangeRequest { + OldBlocksByRangeRequest::new_v1(0, 10, 1) } - fn bbroot_request() -> BlocksByRootRequest { - BlocksByRootRequest { - block_roots: VariableList::from(vec![Hash256::zero()]), - } + fn bbrange_request_v2() -> OldBlocksByRangeRequest { + OldBlocksByRangeRequest::new(0, 10, 1) + } + + fn bbroot_request_v1() -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()].into()) + } + + fn bbroot_request_v2() -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()].into()) } fn ping_message() -> Ping { @@ -777,12 +734,11 @@ mod tests { /// Encodes the given protocol response as bytes. fn encode_response( - protocol: Protocol, - version: Version, + protocol: SupportedProtocol, message: RPCCodedResponse, fork_name: ForkName, ) -> Result { - let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); + let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context); @@ -824,12 +780,11 @@ mod tests { /// Attempts to decode the given protocol bytes as an rpc response fn decode_response( - protocol: Protocol, - version: Version, + protocol: SupportedProtocol, message: &mut BytesMut, fork_name: ForkName, ) -> Result>, RPCError> { - let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); + let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context); let mut snappy_outbound_codec = @@ -840,63 +795,55 @@ mod tests { /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. fn encode_then_decode_response( - protocol: Protocol, - version: Version, + protocol: SupportedProtocol, message: RPCCodedResponse, fork_name: ForkName, ) -> Result>, RPCError> { - let mut encoded = encode_response(protocol, version.clone(), message, fork_name)?; - decode_response(protocol, version, &mut encoded, fork_name) + let mut encoded = encode_response(protocol, message, fork_name)?; + decode_response(protocol, &mut encoded, fork_name) } /// Verifies that requests we send are encoded in a way that we would correctly decode too. fn encode_then_decode_request(req: OutboundRequest, fork_name: ForkName) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context); - for protocol in req.supported_protocols() { - // Encode a request we send - let mut buf = BytesMut::new(); - let mut outbound_codec = SSZSnappyOutboundCodec::::new( - protocol.clone(), - max_packet_size, - fork_context.clone(), - ); - outbound_codec.encode(req.clone(), &mut buf).unwrap(); + let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); + // Encode a request we send + let mut buf = BytesMut::new(); + let mut outbound_codec = SSZSnappyOutboundCodec::::new( + protocol.clone(), + max_packet_size, + fork_context.clone(), + ); + outbound_codec.encode(req.clone(), &mut buf).unwrap(); - let mut inbound_codec = SSZSnappyInboundCodec::::new( - protocol.clone(), - max_packet_size, - fork_context.clone(), - ); + let mut inbound_codec = + SSZSnappyInboundCodec::::new(protocol.clone(), max_packet_size, fork_context); - let decoded = inbound_codec.decode(&mut buf).unwrap().unwrap_or_else(|| { - panic!( - "Should correctly decode the request {} over protocol {:?} and fork {}", - req, protocol, fork_name - ) - }); - match req.clone() { - OutboundRequest::Status(status) => { - assert_eq!(decoded, InboundRequest::Status(status)) - } - OutboundRequest::Goodbye(goodbye) => { - assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) - } - OutboundRequest::BlocksByRange(bbrange) => { - assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) - } - OutboundRequest::BlocksByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) - } - OutboundRequest::Ping(ping) => { - assert_eq!(decoded, InboundRequest::Ping(ping)) - } - OutboundRequest::MetaData(metadata) => { - assert_eq!(decoded, InboundRequest::MetaData(metadata)) - } - OutboundRequest::LightClientBootstrap(bootstrap) => { - assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap)) - } + let decoded = inbound_codec.decode(&mut buf).unwrap().unwrap_or_else(|| { + panic!( + "Should correctly decode the request {} over protocol {:?} and fork {}", + req, protocol, fork_name + ) + }); + match req { + OutboundRequest::Status(status) => { + assert_eq!(decoded, InboundRequest::Status(status)) + } + OutboundRequest::Goodbye(goodbye) => { + assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) + } + OutboundRequest::BlocksByRange(bbrange) => { + assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) + } + OutboundRequest::BlocksByRoot(bbroot) => { + assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) + } + OutboundRequest::Ping(ping) => { + assert_eq!(decoded, InboundRequest::Ping(ping)) + } + OutboundRequest::MetaData(metadata) => { + assert_eq!(decoded, InboundRequest::MetaData(metadata)) } } } @@ -906,8 +853,7 @@ mod tests { fn test_encode_then_decode_v1() { assert_eq!( encode_then_decode_response( - Protocol::Status, - Version::V1, + SupportedProtocol::StatusV1, RPCCodedResponse::Success(RPCResponse::Status(status_message())), ForkName::Base, ), @@ -916,8 +862,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::Ping, - Version::V1, + SupportedProtocol::PingV1, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), ForkName::Base, ), @@ -926,8 +871,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRange, - Version::V1, + SupportedProtocol::BlocksByRangeV1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), @@ -939,8 +883,7 @@ mod tests { assert!( matches!( encode_then_decode_response( - Protocol::BlocksByRange, - Version::V1, + SupportedProtocol::BlocksByRangeV1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ) @@ -952,8 +895,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRoot, - Version::V1, + SupportedProtocol::BlocksByRootV1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), @@ -965,8 +907,7 @@ mod tests { assert!( matches!( encode_then_decode_response( - Protocol::BlocksByRoot, - Version::V1, + SupportedProtocol::BlocksByRootV1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) @@ -978,18 +919,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::MetaData, - Version::V1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), - ForkName::Base, - ), - Ok(Some(RPCResponse::MetaData(metadata()))), - ); - - assert_eq!( - encode_then_decode_response( - Protocol::MetaData, - Version::V1, + SupportedProtocol::MetaDataV1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Base, ), @@ -999,8 +929,7 @@ mod tests { // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 assert_eq!( encode_then_decode_response( - Protocol::MetaData, - Version::V1, + SupportedProtocol::MetaDataV1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), ForkName::Base, ), @@ -1011,38 +940,9 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v2() { - assert!( - matches!( - encode_then_decode_response( - Protocol::Status, - Version::V2, - RPCCodedResponse::Success(RPCResponse::Status(status_message())), - ForkName::Base, - ) - .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), - ), - "status does not have V2 message" - ); - - assert!( - matches!( - encode_then_decode_response( - Protocol::Ping, - Version::V2, - RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), - ForkName::Base, - ) - .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), - ), - "ping does not have V2 message" - ); - assert_eq!( encode_then_decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), @@ -1056,8 +956,7 @@ mod tests { // the current_fork's rpc limit assert_eq!( encode_then_decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ), @@ -1068,8 +967,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ), @@ -1081,8 +979,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() ))), @@ -1100,8 +997,7 @@ mod tests { assert!( matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut encoded, ForkName::Merge, ) @@ -1113,8 +1009,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), @@ -1128,8 +1023,7 @@ mod tests { // the current_fork's rpc limit assert_eq!( encode_then_decode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ), @@ -1140,8 +1034,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ), @@ -1150,8 +1043,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( merge_block_small.clone() ))), @@ -1167,8 +1059,7 @@ mod tests { assert!( matches!( decode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, &mut encoded, ForkName::Merge, ) @@ -1181,8 +1072,7 @@ mod tests { // A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( encode_then_decode_response( - Protocol::MetaData, - Version::V2, + SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Base, ), @@ -1191,8 +1081,7 @@ mod tests { assert_eq!( encode_then_decode_response( - Protocol::MetaData, - Version::V2, + SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), ForkName::Altair, ), @@ -1207,8 +1096,7 @@ mod tests { // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ) @@ -1218,8 +1106,7 @@ mod tests { assert!(matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut encoded_bytes, ForkName::Base ) @@ -1228,8 +1115,7 @@ mod tests { )); let mut encoded_bytes = encode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ) @@ -1239,8 +1125,7 @@ mod tests { assert!(matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut encoded_bytes, ForkName::Base ) @@ -1250,8 +1135,7 @@ mod tests { // Trying to decode a base block with altair context bytes should give ssz decoding error let mut encoded_bytes = encode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ) @@ -1264,8 +1148,7 @@ mod tests { assert!(matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, ForkName::Altair ) @@ -1275,8 +1158,7 @@ mod tests { // Trying to decode an altair block with base context bytes should give ssz decoding error let mut encoded_bytes = encode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) @@ -1288,8 +1170,7 @@ mod tests { assert!(matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, ForkName::Altair ) @@ -1302,8 +1183,7 @@ mod tests { encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); encoded_bytes.extend_from_slice( &encode_response( - Protocol::MetaData, - Version::V2, + SupportedProtocol::MetaDataV2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), ForkName::Altair, ) @@ -1311,8 +1191,7 @@ mod tests { ); assert!(decode_response( - Protocol::MetaData, - Version::V2, + SupportedProtocol::MetaDataV2, &mut encoded_bytes, ForkName::Altair ) @@ -1320,8 +1199,7 @@ mod tests { // Sending context bytes which do not correspond to any fork should return an error let mut encoded_bytes = encode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) @@ -1333,8 +1211,7 @@ mod tests { assert!(matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut wrong_fork_bytes, ForkName::Altair ) @@ -1344,8 +1221,7 @@ mod tests { // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` let mut encoded_bytes = encode_response( - Protocol::BlocksByRoot, - Version::V2, + SupportedProtocol::BlocksByRootV2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) @@ -1355,8 +1231,7 @@ mod tests { assert_eq!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut part, ForkName::Altair ), @@ -1370,9 +1245,12 @@ mod tests { OutboundRequest::Ping(ping_message()), OutboundRequest::Status(status_message()), OutboundRequest::Goodbye(GoodbyeReason::Fault), - OutboundRequest::BlocksByRange(bbrange_request()), - OutboundRequest::BlocksByRoot(bbroot_request()), - OutboundRequest::MetaData(PhantomData::), + OutboundRequest::BlocksByRange(bbrange_request_v1()), + OutboundRequest::BlocksByRange(bbrange_request_v2()), + OutboundRequest::BlocksByRoot(bbroot_request_v1()), + OutboundRequest::BlocksByRoot(bbroot_request_v2()), + OutboundRequest::MetaData(MetadataRequest::new_v1()), + OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; for req in requests.iter() { for fork_name in ForkName::list_all() { @@ -1432,7 +1310,7 @@ mod tests { // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), RPCError::InvalidData(_) )); } @@ -1490,8 +1368,7 @@ mod tests { // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( decode_response( - Protocol::BlocksByRange, - Version::V2, + SupportedProtocol::BlocksByRangeV2, &mut dst, ForkName::Altair ) @@ -1534,7 +1411,7 @@ mod tests { dst.extend_from_slice(writer.get_ref()); assert!(matches!( - decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), RPCError::InvalidData(_) )); } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index bea0929fb0..a0f3acaf76 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -58,18 +58,41 @@ impl FromStr for ProtocolQuota { } } -/// Configurations for the rate limiter applied to outbound requests (made by the node itself). +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug, Default)] +pub struct OutboundRateLimiterConfig(pub RateLimiterConfig); + +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug, Default)] +pub struct InboundRateLimiterConfig(pub RateLimiterConfig); + +impl FromStr for OutboundRateLimiterConfig { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + RateLimiterConfig::from_str(s).map(Self) + } +} + +impl FromStr for InboundRateLimiterConfig { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + RateLimiterConfig::from_str(s).map(Self) + } +} + +/// Configurations for the rate limiter. #[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct OutboundRateLimiterConfig { +pub struct RateLimiterConfig { pub(super) ping_quota: Quota, pub(super) meta_data_quota: Quota, pub(super) status_quota: Quota, pub(super) goodbye_quota: Quota, pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_root_quota: Quota, + pub(super) light_client_bootstrap_quota: Quota, } -impl OutboundRateLimiterConfig { +impl RateLimiterConfig { pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); @@ -77,22 +100,24 @@ impl OutboundRateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); } -impl Default for OutboundRateLimiterConfig { +impl Default for RateLimiterConfig { fn default() -> Self { - OutboundRateLimiterConfig { + RateLimiterConfig { ping_quota: Self::DEFAULT_PING_QUOTA, meta_data_quota: Self::DEFAULT_META_DATA_QUOTA, status_quota: Self::DEFAULT_STATUS_QUOTA, goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, } } } -impl Debug for OutboundRateLimiterConfig { +impl Debug for RateLimiterConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { macro_rules! fmt_q { ($quota:expr) => { @@ -104,7 +129,7 @@ impl Debug for OutboundRateLimiterConfig { }; } - f.debug_struct("OutboundRateLimiterConfig") + f.debug_struct("RateLimiterConfig") .field("ping", fmt_q!(&self.ping_quota)) .field("metadata", fmt_q!(&self.meta_data_quota)) .field("status", fmt_q!(&self.status_quota)) @@ -119,7 +144,7 @@ impl Debug for OutboundRateLimiterConfig { /// the default values. Protocol specified more than once use only the first given Quota. /// /// The expected format is a ';' separated list of [`ProtocolQuota`]. -impl FromStr for OutboundRateLimiterConfig { +impl FromStr for RateLimiterConfig { type Err = &'static str; fn from_str(s: &str) -> Result { @@ -129,6 +154,8 @@ impl FromStr for OutboundRateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; + let mut light_client_bootstrap_quota = None; + for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; let quota = Some(quota); @@ -139,10 +166,12 @@ impl FromStr for OutboundRateLimiterConfig { Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), - Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), + Protocol::LightClientBootstrap => { + light_client_bootstrap_quota = light_client_bootstrap_quota.or(quota) + } } } - Ok(OutboundRateLimiterConfig { + Ok(RateLimiterConfig { ping_quota: ping_quota.unwrap_or(Self::DEFAULT_PING_QUOTA), meta_data_quota: meta_data_quota.unwrap_or(Self::DEFAULT_META_DATA_QUOTA), status_quota: status_quota.unwrap_or(Self::DEFAULT_STATUS_QUOTA), @@ -151,6 +180,8 @@ impl FromStr for OutboundRateLimiterConfig { .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), blocks_by_root_quota: blocks_by_root_quota .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + light_client_bootstrap_quota: light_client_bootstrap_quota + .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index a1743c15fb..8199bee2a7 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -245,7 +245,7 @@ where while let Some((id, req)) = self.dial_queue.pop() { self.events_out.push(Err(HandlerErr::Outbound { error: RPCError::Disconnected, - proto: req.protocol(), + proto: req.versioned_protocol().protocol(), id, })); } @@ -269,7 +269,7 @@ where } _ => self.events_out.push(Err(HandlerErr::Outbound { error: RPCError::Disconnected, - proto: req.protocol(), + proto: req.versioned_protocol().protocol(), id, })), } @@ -334,7 +334,7 @@ where ) { self.dial_negotiated -= 1; let (id, request) = request_info; - let proto = request.protocol(); + let proto = request.versioned_protocol().protocol(); // accept outbound connections only if the handler is not deactivated if matches!(self.state, HandlerState::Deactivated) { @@ -414,7 +414,7 @@ where 128, ) as usize), delay_key: Some(delay_key), - protocol: req.protocol(), + protocol: req.versioned_protocol().protocol(), request_start_time: Instant::now(), remaining_chunks: expected_responses, }, @@ -422,7 +422,7 @@ where } else { self.events_out.push(Err(HandlerErr::Inbound { id: self.current_inbound_substream_id, - proto: req.protocol(), + proto: req.versioned_protocol().protocol(), error: RPCError::HandlerRejected, })); return self.shutdown(None); @@ -498,7 +498,7 @@ where }; self.events_out.push(Err(HandlerErr::Outbound { error, - proto: req.protocol(), + proto: req.versioned_protocol().protocol(), id, })); } @@ -895,7 +895,7 @@ where // else we return an error, stream should not have closed early. let outbound_err = HandlerErr::Outbound { id: request_id, - proto: request.protocol(), + proto: request.versioned_protocol().protocol(), error: RPCError::IncompleteStream, }; return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 5da595c3db..af0ba2510b 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -3,11 +3,13 @@ use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use regex::bytes::Regex; use serde::Serialize; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::{ typenum::{U1024, U256}, VariableList, }; +use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; @@ -85,6 +87,30 @@ pub struct Ping { pub data: u64, } +/// The METADATA request structure. +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug, PartialEq, Serialize),) +)] +#[derive(Clone, Debug, PartialEq)] +pub struct MetadataRequest { + _phantom_data: PhantomData, +} + +impl MetadataRequest { + pub fn new_v1() -> Self { + Self::V1(MetadataRequestV1 { + _phantom_data: PhantomData, + }) + } + + pub fn new_v2() -> Self { + Self::V2(MetadataRequestV2 { + _phantom_data: PhantomData, + }) + } +} + /// The METADATA response structure. #[superstruct( variants(V1, V2), @@ -93,9 +119,8 @@ pub struct Ping { serde(bound = "T: EthSpec", deny_unknown_fields), ) )] -#[derive(Clone, Debug, PartialEq, Serialize, Encode)] +#[derive(Clone, Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec")] -#[ssz(enum_behaviour = "transparent")] pub struct MetaData { /// A sequential counter indicating when data gets modified. pub seq_number: u64, @@ -106,6 +131,38 @@ pub struct MetaData { pub syncnets: EnrSyncCommitteeBitfield, } +impl MetaData { + /// Returns a V1 MetaData response from self. + pub fn metadata_v1(&self) -> Self { + match self { + md @ MetaData::V1(_) => md.clone(), + MetaData::V2(metadata) => MetaData::V1(MetaDataV1 { + seq_number: metadata.seq_number, + attnets: metadata.attnets.clone(), + }), + } + } + + /// Returns a V2 MetaData response from self by filling unavailable fields with default. + pub fn metadata_v2(&self) -> Self { + match self { + MetaData::V1(metadata) => MetaData::V2(MetaDataV2 { + seq_number: metadata.seq_number, + attnets: metadata.attnets.clone(), + syncnets: Default::default(), + }), + md @ MetaData::V2(_) => md.clone(), + } + } + + pub fn as_ssz_bytes(&self) -> Vec { + match self { + MetaData::V1(md) => md.as_ssz_bytes(), + MetaData::V2(md) => md.as_ssz_bytes(), + } + } +} + /// The reason given for a `Goodbye` message. /// /// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`, @@ -197,7 +254,11 @@ impl ssz::Decode for GoodbyeReason { } /// Request a number of beacon block roots from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq)) +)] +#[derive(Clone, Debug, PartialEq)] pub struct BlocksByRangeRequest { /// The starting slot to request blocks. pub start_slot: u64, @@ -206,8 +267,23 @@ pub struct BlocksByRangeRequest { pub count: u64, } +impl BlocksByRangeRequest { + /// The default request is V2 + pub fn new(start_slot: u64, count: u64) -> Self { + Self::V2(BlocksByRangeRequestV2 { start_slot, count }) + } + + pub fn new_v1(start_slot: u64, count: u64) -> Self { + Self::V1(BlocksByRangeRequestV1 { start_slot, count }) + } +} + /// Request a number of beacon block roots from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq)) +)] +#[derive(Clone, Debug, PartialEq)] pub struct OldBlocksByRangeRequest { /// The starting slot to request blocks. pub start_slot: u64, @@ -223,13 +299,43 @@ pub struct OldBlocksByRangeRequest { pub step: u64, } +impl OldBlocksByRangeRequest { + /// The default request is V2 + pub fn new(start_slot: u64, count: u64, step: u64) -> Self { + Self::V2(OldBlocksByRangeRequestV2 { + start_slot, + count, + step, + }) + } + + pub fn new_v1(start_slot: u64, count: u64, step: u64) -> Self { + Self::V1(OldBlocksByRangeRequestV1 { + start_slot, + count, + step, + }) + } +} + /// Request a number of beacon block bodies from a peer. +#[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))] #[derive(Clone, Debug, PartialEq)] pub struct BlocksByRootRequest { /// The list of beacon block bodies being requested. pub block_roots: VariableList, } +impl BlocksByRootRequest { + pub fn new(block_roots: VariableList) -> Self { + Self::V2(BlocksByRootRequestV2 { block_roots }) + } + + pub fn new_v1(block_roots: VariableList) -> Self { + Self::V1(BlocksByRootRequestV1 { block_roots }) + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -438,7 +544,12 @@ impl std::fmt::Display for GoodbyeReason { impl std::fmt::Display for BlocksByRangeRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count) + write!( + f, + "Start Slot: {}, Count: {}", + self.start_slot(), + self.count() + ) } } @@ -447,7 +558,9 @@ impl std::fmt::Display for OldBlocksByRangeRequest { write!( f, "Start Slot: {}, Count: {}, Step: {}", - self.start_slot, self.count, self.step + self.start_slot(), + self.count(), + self.step() ) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 31569b820b..ffdc193bbb 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -17,7 +17,6 @@ use slog::{crit, debug, o}; use std::marker::PhantomData; use std::sync::Arc; use std::task::{Context, Poll}; -use std::time::Duration; use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; @@ -32,7 +31,7 @@ pub use methods::{ pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; -use self::config::OutboundRateLimiterConfig; +use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use self::self_limiter::SelfRateLimiter; pub(crate) mod codec; @@ -112,7 +111,7 @@ type BehaviourAction = /// logic. pub struct RPC { /// Rate limiter - limiter: RateLimiter, + limiter: Option, /// Rate limiter for our own requests. self_limiter: Option>, /// Queue of events to be processed. @@ -127,32 +126,24 @@ impl RPC { pub fn new( fork_context: Arc, enable_light_client_server: bool, + inbound_rate_limiter_config: Option, outbound_rate_limiter_config: Option, log: slog::Logger, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); - let limiter = RateLimiter::builder() - .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) - .n_every(Protocol::Ping, 2, Duration::from_secs(10)) - .n_every(Protocol::Status, 5, Duration::from_secs(15)) - .one_every(Protocol::Goodbye, Duration::from_secs(10)) - .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) - .n_every( - Protocol::BlocksByRange, - methods::MAX_REQUEST_BLOCKS, - Duration::from_secs(10), - ) - .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) - .build() - .expect("Configuration parameters are valid"); + let inbound_limiter = inbound_rate_limiter_config.map(|config| { + debug!(log, "Using inbound rate limiting params"; "config" => ?config); + RateLimiter::new_with_config(config.0) + .expect("Inbound limiter configuration parameters are valid") + }); let self_limiter = outbound_rate_limiter_config.map(|config| { SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") }); RPC { - limiter, + limiter: inbound_limiter, self_limiter, events: Vec::new(), fork_context, @@ -242,50 +233,60 @@ where event: ::OutEvent, ) { if let Ok(RPCReceived::Request(ref id, ref req)) = event { - // check if the request is conformant to the quota - match self.limiter.allows(&peer_id, req) { - Ok(()) => { - // send the event to the user - self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) - } - Err(RateLimitedErr::TooLarge) => { - // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = req.protocol(); - if matches!(protocol, Protocol::BlocksByRange) { - debug!(self.log, "Blocks by range request will never be processed"; "request" => %req); - } else { - crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + if let Some(limiter) = self.limiter.as_mut() { + // check if the request is conformant to the quota + match limiter.allows(&peer_id, req) { + Ok(()) => { + // send the event to the user + self.events + .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } - // send an error code to the peer. - // the handler upon receiving the error code will send it back to the behaviour - self.send_response( - peer_id, - (conn_id, *id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, - "Rate limited. Request too large".into(), - ), - ); - } - Err(RateLimitedErr::TooSoon(wait_time)) => { - debug!(self.log, "Request exceeds the rate limit"; + Err(RateLimitedErr::TooLarge) => { + // we set the batch sizes, so this is a coding/config err for most protocols + let protocol = req.versioned_protocol().protocol(); + if matches!(protocol, Protocol::BlocksByRange) { + debug!(self.log, "Blocks by range request will never be processed"; "request" => %req); + } else { + crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + } + // send an error code to the peer. + // the handler upon receiving the error code will send it back to the behaviour + self.send_response( + peer_id, + (conn_id, *id), + RPCCodedResponse::Error( + RPCResponseErrorCode::RateLimited, + "Rate limited. Request too large".into(), + ), + ); + } + Err(RateLimitedErr::TooSoon(wait_time)) => { + debug!(self.log, "Request exceeds the rate limit"; "request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); - // send an error code to the peer. - // the handler upon receiving the error code will send it back to the behaviour - self.send_response( - peer_id, - (conn_id, *id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, - format!("Wait {:?}", wait_time).into(), - ), - ); + // send an error code to the peer. + // the handler upon receiving the error code will send it back to the behaviour + self.send_response( + peer_id, + (conn_id, *id), + RPCCodedResponse::Error( + RPCResponseErrorCode::RateLimited, + format!("Wait {:?}", wait_time).into(), + ), + ); + } } + } else { + // No rate limiting, send the event to the user + self.events + .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } } else { self.events @@ -303,7 +304,9 @@ where _: &mut impl PollParameters, ) -> Poll> { // let the rate limiter prune. - let _ = self.limiter.poll_unpin(cx); + if let Some(limiter) = self.limiter.as_mut() { + let _ = limiter.poll_unpin(cx); + } if let Some(self_limiter) = self.self_limiter.as_mut() { if let Poll::Ready(event) = self_limiter.poll_ready(cx) { @@ -332,7 +335,7 @@ where serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; let (msg_kind, protocol) = match &self.event { Ok(received) => match received { - RPCReceived::Request(_, req) => ("request", req.protocol()), + RPCReceived::Request(_, req) => ("request", req.versioned_protocol().protocol()), RPCReceived::Response(_, res) => ("response", res.protocol()), RPCReceived::EndOfStream(_, end) => ( "end_of_stream", diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 774303800e..d12f366861 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -1,11 +1,8 @@ -use std::marker::PhantomData; - use super::methods::*; -use super::protocol::Protocol; use super::protocol::ProtocolId; +use super::protocol::SupportedProtocol; use super::RPCError; use crate::rpc::protocol::Encoding; -use crate::rpc::protocol::Version; use crate::rpc::{ codec::{base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec}, methods::ResponseTermination, @@ -38,9 +35,8 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), - LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), - MetaData(PhantomData), + MetaData(MetadataRequest), } impl UpgradeInfo for OutboundRequestContainer { @@ -59,36 +55,29 @@ impl OutboundRequest { match self { // add more protocols when versions/encodings are supported OutboundRequest::Status(_) => vec![ProtocolId::new( - Protocol::Status, - Version::V1, + SupportedProtocol::StatusV1, Encoding::SSZSnappy, )], OutboundRequest::Goodbye(_) => vec![ProtocolId::new( - Protocol::Goodbye, - Version::V1, + SupportedProtocol::GoodbyeV1, Encoding::SSZSnappy, )], OutboundRequest::BlocksByRange(_) => vec![ - ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), ], OutboundRequest::BlocksByRoot(_) => vec![ - ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), ], OutboundRequest::Ping(_) => vec![ProtocolId::new( - Protocol::Ping, - Version::V1, + SupportedProtocol::PingV1, Encoding::SSZSnappy, )], OutboundRequest::MetaData(_) => vec![ - ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), ], - // Note: This match arm is technically unreachable as we only respond to light client requests - // that we generate from the beacon state. - // We do not make light client rpc requests from the beacon node - OutboundRequest::LightClientBootstrap(_) => vec![], } } /* These functions are used in the handler for stream management */ @@ -98,24 +87,31 @@ impl OutboundRequest { match self { OutboundRequest::Status(_) => 1, OutboundRequest::Goodbye(_) => 0, - OutboundRequest::BlocksByRange(req) => req.count, - OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, + OutboundRequest::BlocksByRange(req) => *req.count(), + OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, - OutboundRequest::LightClientBootstrap(_) => 1, } } - /// Gives the corresponding `Protocol` to this request. - pub fn protocol(&self) -> Protocol { + /// Gives the corresponding `SupportedProtocol` to this request. + pub fn versioned_protocol(&self) -> SupportedProtocol { match self { - OutboundRequest::Status(_) => Protocol::Status, - OutboundRequest::Goodbye(_) => Protocol::Goodbye, - OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, - OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, - OutboundRequest::Ping(_) => Protocol::Ping, - OutboundRequest::MetaData(_) => Protocol::MetaData, - OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, + OutboundRequest::Status(_) => SupportedProtocol::StatusV1, + OutboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, + OutboundRequest::BlocksByRange(req) => match req { + OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, + OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, + }, + OutboundRequest::BlocksByRoot(req) => match req { + BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, + BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, + }, + OutboundRequest::Ping(_) => SupportedProtocol::PingV1, + OutboundRequest::MetaData(req) => match req { + MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, + MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, + }, } } @@ -127,7 +123,6 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -185,9 +180,6 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), - OutboundRequest::LightClientBootstrap(bootstrap) => { - write!(f, "Lightclient Bootstrap: {}", bootstrap.root) - } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index a8423e47b0..ea39c1423a 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -179,21 +179,74 @@ pub enum Protocol { LightClientBootstrap, } -/// RPC Versions -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Version { - /// Version 1 of RPC - V1, - /// Version 2 of RPC - V2, -} - /// RPC Encondings supported. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Encoding { SSZSnappy, } +/// All valid protocol name and version combinations. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum SupportedProtocol { + StatusV1, + GoodbyeV1, + BlocksByRangeV1, + BlocksByRangeV2, + BlocksByRootV1, + BlocksByRootV2, + PingV1, + MetaDataV1, + MetaDataV2, + LightClientBootstrapV1, +} + +impl SupportedProtocol { + pub fn version_string(&self) -> &'static str { + match self { + SupportedProtocol::StatusV1 => "1", + SupportedProtocol::GoodbyeV1 => "1", + SupportedProtocol::BlocksByRangeV1 => "1", + SupportedProtocol::BlocksByRangeV2 => "2", + SupportedProtocol::BlocksByRootV1 => "1", + SupportedProtocol::BlocksByRootV2 => "2", + SupportedProtocol::PingV1 => "1", + SupportedProtocol::MetaDataV1 => "1", + SupportedProtocol::MetaDataV2 => "2", + SupportedProtocol::LightClientBootstrapV1 => "1", + } + } + + pub fn protocol(&self) -> Protocol { + match self { + SupportedProtocol::StatusV1 => Protocol::Status, + SupportedProtocol::GoodbyeV1 => Protocol::Goodbye, + SupportedProtocol::BlocksByRangeV1 => Protocol::BlocksByRange, + SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange, + SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot, + SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, + SupportedProtocol::PingV1 => Protocol::Ping, + SupportedProtocol::MetaDataV1 => Protocol::MetaData, + SupportedProtocol::MetaDataV2 => Protocol::MetaData, + SupportedProtocol::LightClientBootstrapV1 => Protocol::LightClientBootstrap, + } + } + + fn currently_supported() -> Vec { + vec![ + ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy), + ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy), + // V2 variants have higher preference then V1 + ProtocolId::new(Self::BlocksByRangeV2, Encoding::SSZSnappy), + ProtocolId::new(Self::BlocksByRangeV1, Encoding::SSZSnappy), + ProtocolId::new(Self::BlocksByRootV2, Encoding::SSZSnappy), + ProtocolId::new(Self::BlocksByRootV1, Encoding::SSZSnappy), + ProtocolId::new(Self::PingV1, Encoding::SSZSnappy), + ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy), + ] + } +} + impl std::fmt::Display for Encoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { @@ -203,16 +256,6 @@ impl std::fmt::Display for Encoding { } } -impl std::fmt::Display for Version { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let repr = match self { - Version::V1 => "1", - Version::V2 => "2", - }; - f.write_str(repr) - } -} - #[derive(Debug, Clone)] pub struct RPCProtocol { pub fork_context: Arc, @@ -227,22 +270,10 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - let mut supported_protocols = vec![ - ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), - ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), - // V2 variants have higher preference then V1 - ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), - ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), - ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), - ]; + let mut supported_protocols = SupportedProtocol::currently_supported(); if self.enable_light_client_server { supported_protocols.push(ProtocolId::new( - Protocol::LightClientBootstrap, - Version::V1, + SupportedProtocol::LightClientBootstrapV1, Encoding::SSZSnappy, )); } @@ -272,11 +303,8 @@ impl RpcLimits { /// Tracks the types in a protocol id. #[derive(Clone, Debug)] pub struct ProtocolId { - /// The RPC message type/name. - pub message_name: Protocol, - - /// The version of the RPC. - pub version: Version, + /// The protocol name and version + pub versioned_protocol: SupportedProtocol, /// The encoding of the RPC. pub encoding: Encoding, @@ -288,7 +316,7 @@ pub struct ProtocolId { impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. pub fn rpc_request_limits(&self) -> RpcLimits { - match self.message_name { + match self.versioned_protocol.protocol() { Protocol::Status => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -297,9 +325,10 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), + // V1 and V2 requests are the same Protocol::BlocksByRange => RpcLimits::new( - ::ssz_fixed_len(), - ::ssz_fixed_len(), + ::ssz_fixed_len(), + ::ssz_fixed_len(), ), Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) @@ -318,7 +347,7 @@ impl ProtocolId { /// Returns min and max size for messages of given protocol id responses. pub fn rpc_response_limits(&self, fork_context: &ForkContext) -> RpcLimits { - match self.message_name { + match self.versioned_protocol.protocol() { Protocol::Status => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -344,30 +373,34 @@ impl ProtocolId { /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the /// beginning of the stream, else returns `false`. pub fn has_context_bytes(&self) -> bool { - match self.message_name { - Protocol::BlocksByRange | Protocol::BlocksByRoot => match self.version { - Version::V2 => true, - Version::V1 => false, - }, - Protocol::LightClientBootstrap => match self.version { - Version::V2 | Version::V1 => true, - }, - Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false, + match self.versioned_protocol { + SupportedProtocol::BlocksByRangeV2 + | SupportedProtocol::BlocksByRootV2 + | SupportedProtocol::LightClientBootstrapV1 => true, + SupportedProtocol::StatusV1 + | SupportedProtocol::BlocksByRootV1 + | SupportedProtocol::BlocksByRangeV1 + | SupportedProtocol::PingV1 + | SupportedProtocol::MetaDataV1 + | SupportedProtocol::MetaDataV2 + | SupportedProtocol::GoodbyeV1 => false, } } } /// An RPC protocol ID. impl ProtocolId { - pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self { + pub fn new(versioned_protocol: SupportedProtocol, encoding: Encoding) -> Self { let protocol_id = format!( "{}/{}/{}/{}", - PROTOCOL_PREFIX, message_name, version, encoding + PROTOCOL_PREFIX, + versioned_protocol.protocol(), + versioned_protocol.version_string(), + encoding ); ProtocolId { - message_name, - version, + versioned_protocol, encoding, protocol_id, } @@ -400,7 +433,7 @@ where fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future { async move { - let protocol_name = protocol.message_name; + let versioned_protocol = protocol.versioned_protocol; // convert the socket to tokio compatible socket let socket = socket.compat(); let codec = match protocol.encoding { @@ -419,8 +452,13 @@ where let socket = Framed::new(Box::pin(timed_socket), codec); // MetaData requests should be empty, return the stream - match protocol_name { - Protocol::MetaData => Ok((InboundRequest::MetaData(PhantomData), socket)), + match versioned_protocol { + SupportedProtocol::MetaDataV1 => { + Ok((InboundRequest::MetaData(MetadataRequest::new_v1()), socket)) + } + SupportedProtocol::MetaDataV2 => { + Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket)) + } _ => { match tokio::time::timeout( Duration::from_secs(REQUEST_TIMEOUT), @@ -448,7 +486,7 @@ pub enum InboundRequest { BlocksByRoot(BlocksByRootRequest), LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), - MetaData(PhantomData), + MetaData(MetadataRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. @@ -460,24 +498,33 @@ impl InboundRequest { match self { InboundRequest::Status(_) => 1, InboundRequest::Goodbye(_) => 0, - InboundRequest::BlocksByRange(req) => req.count, - InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, + InboundRequest::BlocksByRange(req) => *req.count(), + InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, } } - /// Gives the corresponding `Protocol` to this request. - pub fn protocol(&self) -> Protocol { + /// Gives the corresponding `SupportedProtocol` to this request. + pub fn versioned_protocol(&self) -> SupportedProtocol { match self { - InboundRequest::Status(_) => Protocol::Status, - InboundRequest::Goodbye(_) => Protocol::Goodbye, - InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, - InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, - InboundRequest::Ping(_) => Protocol::Ping, - InboundRequest::MetaData(_) => Protocol::MetaData, - InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, + InboundRequest::Status(_) => SupportedProtocol::StatusV1, + InboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, + InboundRequest::BlocksByRange(req) => match req { + OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, + OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, + }, + InboundRequest::BlocksByRoot(req) => match req { + BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, + BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, + }, + InboundRequest::Ping(_) => SupportedProtocol::PingV1, + InboundRequest::MetaData(req) => match req { + MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, + MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, + }, + InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index a1f7b89a2f..e1634d711b 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,3 +1,4 @@ +use super::config::RateLimiterConfig; use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; @@ -141,29 +142,6 @@ impl RPCRateLimiterBuilder { self } - /// Allow one token every `time_period` to be used for this `protocol`. - /// This produces a hard limit. - pub fn one_every(self, protocol: Protocol, time_period: Duration) -> Self { - self.set_quota( - protocol, - Quota { - replenish_all_every: time_period, - max_tokens: 1, - }, - ) - } - - /// Allow `n` tokens to be use used every `time_period` for this `protocol`. - pub fn n_every(self, protocol: Protocol, n: u64, time_period: Duration) -> Self { - self.set_quota( - protocol, - Quota { - max_tokens: n, - replenish_all_every: time_period, - }, - ) - } - pub fn build(self) -> Result { // get our quotas let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?; @@ -214,7 +192,7 @@ pub trait RateLimiterItem { impl RateLimiterItem for super::InboundRequest { fn protocol(&self) -> Protocol { - self.protocol() + self.versioned_protocol().protocol() } fn expected_responses(&self) -> u64 { @@ -224,7 +202,7 @@ impl RateLimiterItem for super::InboundRequest { impl RateLimiterItem for super::OutboundRequest { fn protocol(&self) -> Protocol { - self.protocol() + self.versioned_protocol().protocol() } fn expected_responses(&self) -> u64 { @@ -232,6 +210,29 @@ impl RateLimiterItem for super::OutboundRequest { } } impl RPCRateLimiter { + pub fn new_with_config(config: RateLimiterConfig) -> Result { + // Destructure to make sure every configuration value is used. + let RateLimiterConfig { + ping_quota, + meta_data_quota, + status_quota, + goodbye_quota, + blocks_by_range_quota, + blocks_by_root_quota, + light_client_bootstrap_quota, + } = config; + + Self::builder() + .set_quota(Protocol::Ping, ping_quota) + .set_quota(Protocol::MetaData, meta_data_quota) + .set_quota(Protocol::Status, status_quota) + .set_quota(Protocol::Goodbye, goodbye_quota) + .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) + .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) + .build() + } + /// Get a builder instance. pub fn builder() -> RPCRateLimiterBuilder { RPCRateLimiterBuilder::default() diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 451c6206f3..626917d6a7 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -52,28 +52,7 @@ impl SelfRateLimiter { /// Creates a new [`SelfRateLimiter`] based on configration values. pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { debug!(log, "Using self rate limiting params"; "config" => ?config); - // Destructure to make sure every configuration value is used. - let OutboundRateLimiterConfig { - ping_quota, - meta_data_quota, - status_quota, - goodbye_quota, - blocks_by_range_quota, - blocks_by_root_quota, - } = config; - - let limiter = RateLimiter::builder() - .set_quota(Protocol::Ping, ping_quota) - .set_quota(Protocol::MetaData, meta_data_quota) - .set_quota(Protocol::Status, status_quota) - .set_quota(Protocol::Goodbye, goodbye_quota) - .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) - .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) - // Manually set the LightClientBootstrap quota, since we use the same rate limiter for - // inbound and outbound requests, and the LightClientBootstrap is an only inbound - // protocol. - .one_every(Protocol::LightClientBootstrap, Duration::from_secs(10)) - .build()?; + let limiter = RateLimiter::new_with_config(config.0)?; Ok(SelfRateLimiter { delayed_requests: Default::default(), @@ -93,7 +72,7 @@ impl SelfRateLimiter { request_id: Id, req: OutboundRequest, ) -> Result, Error> { - let protocol = req.protocol(); + let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) { queued_requests.push_back(QueuedRequest { req, request_id }); @@ -132,7 +111,7 @@ impl SelfRateLimiter { event: RPCSend::Request(request_id, req), }), Err(e) => { - let protocol = req.protocol(); + let protocol = req.versioned_protocol(); match e { RateLimitedErr::TooLarge => { // this should never happen with default parameters. Let's just send the request. @@ -140,7 +119,7 @@ impl SelfRateLimiter { crit!( log, "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters."; - "protocol" => %req.protocol() + "protocol" => %req.versioned_protocol().protocol() ); Ok(BehaviourAction::NotifyHandler { peer_id, @@ -149,7 +128,7 @@ impl SelfRateLimiter { }) } RateLimitedErr::TooSoon(wait_time) => { - debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); + debug!(log, "Self rate limiting"; "protocol" => %protocol.protocol(), "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); Err((QueuedRequest { req, request_id }, wait_time)) } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index bd3df79769..5ab89fee51 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -7,7 +7,8 @@ use types::{EthSpec, SignedBeaconBlock}; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, - OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, + OldBlocksByRangeRequest, OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, + RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, }, OutboundRequest, SubstreamId, }; @@ -43,14 +44,25 @@ impl std::convert::From for OutboundRequest { fn from(req: Request) -> OutboundRequest { match req { Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { - OutboundRequest::BlocksByRange(OldBlocksByRangeRequest { - start_slot, - count, - step: 1, - }) + Request::BlocksByRange(r) => match r { + BlocksByRangeRequest::V1(req) => OutboundRequest::BlocksByRange( + OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: req.start_slot, + count: req.count, + step: 1, + }), + ), + BlocksByRangeRequest::V2(req) => OutboundRequest::BlocksByRange( + OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: req.start_slot, + count: req.count, + step: 1, + }), + ), + }, + Request::LightClientBootstrap(_) => { + unreachable!("Lighthouse never makes an outbound light client request") } - Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::Status(s) => OutboundRequest::Status(s), } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index f815e3bd36..129a4da25b 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -9,6 +9,7 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::rpc::methods::MetadataRequest; use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; @@ -37,7 +38,6 @@ use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; use std::pin::Pin; use std::{ - marker::PhantomData, sync::Arc, task::{Context, Poll}, }; @@ -266,6 +266,7 @@ impl Network { let eth2_rpc = RPC::new( ctx.fork_context.clone(), config.enable_light_client_server, + config.inbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(), log.clone(), ); @@ -943,16 +944,25 @@ impl Network { /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { - let event = OutboundRequest::MetaData(PhantomData); + // We always prefer sending V2 requests + let event = OutboundRequest::MetaData(MetadataRequest::new_v2()); self.eth2_rpc_mut() .send_request(peer_id, RequestId::Internal, event); } /// Sends a METADATA response to a peer. - fn send_meta_data_response(&mut self, id: PeerRequestId, peer_id: PeerId) { - let event = RPCCodedResponse::Success(RPCResponse::MetaData( - self.network_globals.local_metadata.read().clone(), - )); + fn send_meta_data_response( + &mut self, + req: MetadataRequest, + id: PeerRequestId, + peer_id: PeerId, + ) { + let metadata = self.network_globals.local_metadata.read().clone(); + let metadata = match req { + MetadataRequest::V1(_) => metadata.metadata_v1(), + MetadataRequest::V2(_) => metadata, + }; + let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata)); self.eth2_rpc_mut().send_response(peer_id, id, event); } @@ -1195,9 +1205,9 @@ impl Network { self.pong(peer_request_id, peer_id); None } - InboundRequest::MetaData(_) => { + InboundRequest::MetaData(req) => { // send the requested meta-data - self.send_meta_data_response((handler_id, id), peer_id); + self.send_meta_data_response(req, (handler_id, id), peer_id); None } InboundRequest::Goodbye(reason) => { @@ -1224,13 +1234,9 @@ impl Network { Some(event) } InboundRequest::BlocksByRange(req) => { - let methods::OldBlocksByRangeRequest { - start_slot, - mut count, - step, - } = req; // Still disconnect the peer if the request is naughty. - if step == 0 { + let mut count = *req.count(); + if *req.step() == 0 { self.peer_manager_mut().handle_rpc_error( &peer_id, Protocol::BlocksByRange, @@ -1242,14 +1248,18 @@ impl Network { return None; } // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 - if step > 1 { + if *req.step() > 1 { count = 1; } - let event = self.build_request( - peer_request_id, - peer_id, - Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }), - ); + let request = match req { + methods::OldBlocksByRangeRequest::V1(req) => Request::BlocksByRange( + BlocksByRangeRequest::new_v1(req.start_slot, count), + ), + methods::OldBlocksByRangeRequest::V2(req) => Request::BlocksByRange( + BlocksByRangeRequest::new(req.start_slot, count), + ), + }; + let event = self.build_request(peer_request_id, peer_id, request); Some(event) } InboundRequest::BlocksByRoot(req) => { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 625df65ee9..ac0dc57d7b 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -272,9 +272,11 @@ pub(crate) fn save_metadata_to_disk( log: &slog::Logger, ) { let _ = std::fs::create_dir_all(dir); - match File::create(dir.join(METADATA_FILENAME)) - .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) - { + let metadata_bytes = match metadata { + MetaData::V1(md) => md.as_ssz_bytes(), + MetaData::V2(md) => md.as_ssz_bytes(), + }; + match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) { Ok(_) => { debug!(log, "Metadata written to disk"); } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index d44f20c080..64714cbc0a 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -123,36 +123,6 @@ pub fn get_enr(node: &LibP2PService) -> Enr { node.local_enr() } -// Returns `n` libp2p peers in fully connected topology. -#[allow(dead_code)] -/* -pub async fn build_full_mesh( - rt: Weak, - log: slog::Logger, - n: usize, - fork_name: ForkName, -) -> Vec { - let mut nodes = Vec::with_capacity(n); - for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); - } - let multiaddrs: Vec = nodes - .iter() - .map(|x| get_enr(x).multiaddr()[1].clone()) - .collect(); - - for (i, node) in nodes.iter_mut().enumerate().take(n) { - for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) { - if i != j { - match libp2p::Swarm::dial(&mut node.swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Connected"), - Err(_) => error!(log, "Failed to connect"), - }; - } - } - } - nodes -}*/ // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] diff --git a/beacon_node/lighthouse_network/tests/gossipsub_tests.rs b/beacon_node/lighthouse_network/tests/gossipsub_tests.rs deleted file mode 100644 index c5b661cf70..0000000000 --- a/beacon_node/lighthouse_network/tests/gossipsub_tests.rs +++ /dev/null @@ -1,171 +0,0 @@ -/* These are temporarily disabled due to their non-deterministic behaviour and impending update to - * gossipsub 1.1. We leave these here as a template for future test upgrades - - -#![cfg(test)] -use crate::types::GossipEncoding; -use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock}; -use lighthouse_network::*; -use slog::{debug, Level}; - -type E = MinimalEthSpec; - -mod common; - -/* Gossipsub tests */ -// Note: The aim of these tests is not to test the robustness of the gossip network -// but to check if the gossipsub implementation is behaving according to the specifications. - -// Test if gossipsub message are forwarded by nodes with a simple linear topology. -// -// Topology used in test -// -// node1 <-> node2 <-> node3 ..... <-> node(n-1) <-> node(n) - -#[tokio::test] -async fn test_gossipsub_forward() { - // set up the logging. The level and enabled or not - let log = common::build_log(Level::Info, false); - - let num_nodes = 20; - let mut nodes = common::build_linear(log.clone(), num_nodes); - let mut received_count = 0; - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let signed_block = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty_signature(), - }; - let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block)); - let publishing_topic: String = pubsub_message - .topics(GossipEncoding::default(), [0, 0, 0, 0]) - .first() - .unwrap() - .clone() - .into(); - let mut subscribed_count = 0; - let fut = async move { - for node in nodes.iter_mut() { - loop { - match node.next_event().await { - Libp2pEvent::Behaviour(b) => match b { - BehaviourEvent::PubsubMessage { - topics, - message, - source, - id, - } => { - assert_eq!(topics.len(), 1); - // Assert topic is the published topic - assert_eq!( - topics.first().unwrap(), - &TopicHash::from_raw(publishing_topic.clone()) - ); - // Assert message received is the correct one - assert_eq!(message, pubsub_message.clone()); - received_count += 1; - // Since `propagate_message` is false, need to propagate manually - node.swarm.propagate_message(&source, id); - // Test should succeed if all nodes except the publisher receive the message - if received_count == num_nodes - 1 { - debug!(log.clone(), "Received message at {} nodes", num_nodes - 1); - return; - } - } - BehaviourEvent::PeerSubscribed(_, topic) => { - // Publish on beacon block topic - if topic == TopicHash::from_raw(publishing_topic.clone()) { - subscribed_count += 1; - // Every node except the corner nodes are connected to 2 nodes. - if subscribed_count == (num_nodes * 2) - 2 { - node.swarm.publish(vec![pubsub_message.clone()]); - } - } - } - _ => break, - }, - _ => break, - } - } - } - }; - - tokio::select! { - _ = fut => {} - _ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => { - panic!("Future timed out"); - } - } -} - -// Test publishing of a message with a full mesh for the topic -// Not very useful but this is the bare minimum functionality. -#[tokio::test] -async fn test_gossipsub_full_mesh_publish() { - // set up the logging. The level and enabled or not - let log = common::build_log(Level::Debug, false); - - // Note: This test does not propagate gossipsub messages. - // Having `num_nodes` > `mesh_n_high` may give inconsistent results - // as nodes may get pruned out of the mesh before the gossipsub message - // is published to them. - let num_nodes = 12; - let mut nodes = common::build_full_mesh(log, num_nodes); - let mut publishing_node = nodes.pop().unwrap(); - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let signed_block = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty_signature(), - }; - let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block)); - let publishing_topic: String = pubsub_message - .topics(GossipEncoding::default(), [0, 0, 0, 0]) - .first() - .unwrap() - .clone() - .into(); - let mut subscribed_count = 0; - let mut received_count = 0; - let fut = async move { - for node in nodes.iter_mut() { - while let Libp2pEvent::Behaviour(BehaviourEvent::PubsubMessage { - topics, - message, - .. - }) = node.next_event().await - { - assert_eq!(topics.len(), 1); - // Assert topic is the published topic - assert_eq!( - topics.first().unwrap(), - &TopicHash::from_raw(publishing_topic.clone()) - ); - // Assert message received is the correct one - assert_eq!(message, pubsub_message.clone()); - received_count += 1; - if received_count == num_nodes - 1 { - return; - } - } - } - while let Libp2pEvent::Behaviour(BehaviourEvent::PeerSubscribed(_, topic)) = - publishing_node.next_event().await - { - // Publish on beacon block topic - if topic == TopicHash::from_raw(publishing_topic.clone()) { - subscribed_count += 1; - if subscribed_count == num_nodes - 1 { - publishing_node.swarm.publish(vec![pubsub_message.clone()]); - } - } - } - }; - tokio::select! { - _ = fut => {} - _ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => { - panic!("Future timed out"); - } - } -} -*/ diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index ebdbb67421..656df0c4a1 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -155,10 +155,7 @@ fn test_blocks_by_range_chunked_rpc() { common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: messages_to_send, - }); + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); let spec = E::default_spec(); @@ -282,10 +279,7 @@ fn test_blocks_by_range_over_limit() { common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: messages_to_send, - }); + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); // BlocksByRange Response let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); @@ -367,10 +361,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: messages_to_send, - }); + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); // BlocksByRange Response let spec = E::default_spec(); @@ -490,10 +481,7 @@ fn test_blocks_by_range_single_empty_rpc() { common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: 10, - }); + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); // BlocksByRange Response let spec = E::default_spec(); @@ -594,16 +582,15 @@ fn test_blocks_by_root_chunked_rpc() { common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from(vec![ + let rpc_request = + Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![ Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - ]), - }); + ]))); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -722,8 +709,8 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from(vec![ + let rpc_request = + Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![ Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), @@ -734,8 +721,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - ]), - }); + ]))); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index a234165d11..aa1827787c 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -22,7 +22,7 @@ slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" ethereum_ssz = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } @@ -46,8 +46,4 @@ derivative = "2.2.0" delay_map = "0.3.0" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } -execution_layer = { path = "../execution_layer" } - -[features] -deterministic_long_lived_attnets = [ "ethereum-types" ] -# default = ["deterministic_long_lived_attnets"] +execution_layer = { path = "../execution_layer" } \ No newline at end of file diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 9603205228..84d8e1b07a 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -54,6 +54,7 @@ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; use std::future::Future; +use std::path::PathBuf; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -749,6 +750,24 @@ impl std::convert::From> for WorkEvent { } } +pub struct BeaconProcessorSend(pub mpsc::Sender>); + +impl BeaconProcessorSend { + pub fn try_send(&self, message: WorkEvent) -> Result<(), Box>>> { + let work_type = message.work_type(); + match self.0.try_send(message) { + Ok(res) => Ok(res), + Err(e) => { + metrics::inc_counter_vec( + &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, + &[work_type], + ); + Err(Box::new(e)) + } + } + } +} + /// A consensus message (or multiple) from the network that requires processing. #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] @@ -982,6 +1001,13 @@ impl Stream for InboundEvents { } } +/// Defines if and where we will store the SSZ files of invalid blocks. +#[derive(Clone)] +pub enum InvalidBlockStorage { + Enabled(PathBuf), + Disabled, +} + /// A mutli-threaded processor for messages received on the network /// that need to be processed by the `BeaconChain` /// @@ -995,6 +1021,7 @@ pub struct BeaconProcessor { pub max_workers: usize, pub current_workers: usize, pub importing_blocks: DuplicateCache, + pub invalid_block_storage: InvalidBlockStorage, pub log: Logger, } @@ -1676,19 +1703,23 @@ impl BeaconProcessor { peer_client, block, seen_timestamp, - } => task_spawner.spawn_async(async move { - worker - .process_gossip_block( - message_id, - peer_id, - peer_client, - block, - work_reprocessing_tx, - duplicate_cache, - seen_timestamp, - ) - .await - }), + } => { + let invalid_block_storage = self.invalid_block_storage.clone(); + task_spawner.spawn_async(async move { + worker + .process_gossip_block( + message_id, + peer_id, + peer_client, + block, + work_reprocessing_tx, + duplicate_cache, + invalid_block_storage, + seen_timestamp, + ) + .await + }) + } /* * Import for blocks that we received earlier than their intended slot. */ @@ -1696,12 +1727,16 @@ impl BeaconProcessor { peer_id, block, seen_timestamp, - } => task_spawner.spawn_async(worker.process_gossip_verified_block( - peer_id, - *block, - work_reprocessing_tx, - seen_timestamp, - )), + } => { + let invalid_block_storage = self.invalid_block_storage.clone(); + task_spawner.spawn_async(worker.process_gossip_verified_block( + peer_id, + *block, + work_reprocessing_tx, + invalid_block_storage, + seen_timestamp, + )) + } /* * Voluntary exits received on gossip. */ diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 4b0a159eb4..b93e83ad78 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -203,6 +203,7 @@ impl TestRig { max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, importing_blocks: duplicate_cache.clone(), + invalid_block_storage: InvalidBlockStorage::Disabled, log: log.clone(), } .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 1ec03ae954..91ec81b18d 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -8,14 +8,17 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, - GossipVerifiedBlock, NotifyExecutionLayer, + BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, + NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; -use slog::{crit, debug, error, info, trace, warn}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; +use std::fs; +use std::io::Write; +use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; @@ -34,7 +37,7 @@ use super::{ }, Worker, }; -use crate::beacon_processor::DuplicateCache; +use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage}; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus /// messages. @@ -663,6 +666,7 @@ impl Worker { block: Arc>, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, + invalid_block_storage: InvalidBlockStorage, seen_duration: Duration, ) { if let Some(gossip_verified_block) = self @@ -683,6 +687,7 @@ impl Worker { peer_id, gossip_verified_block, reprocess_tx, + invalid_block_storage, seen_duration, ) .await; @@ -780,6 +785,20 @@ impl Worker { verified_block } + Err(e @ BlockError::Slashable) => { + warn!( + self.log, + "Received equivocating block from peer"; + "error" => ?e + ); + /* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */ + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "gossip_block_mid", + ); + return None; + } Err(BlockError::ParentUnknown(block)) => { debug!( self.log, @@ -801,7 +820,6 @@ impl Worker { Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::BlockIsAlreadyKnown) - | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); @@ -830,7 +848,6 @@ impl Worker { | Err(e @ BlockError::NonLinearParentRoots) | Err(e @ BlockError::BlockIsNotLaterThanParent { .. }) | Err(e @ BlockError::InvalidSignature) - | Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) @@ -935,28 +952,30 @@ impl Worker { peer_id: PeerId, verified_block: GossipVerifiedBlock, reprocess_tx: mpsc::Sender>, + invalid_block_storage: InvalidBlockStorage, // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { let block: Arc<_> = verified_block.block.clone(); let block_root = verified_block.block_root; - match self + let result = self .chain .process_block( block_root, verified_block, - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ) - .await - { + .await; + + match &result { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx .try_send(ReprocessQueueMessage::BlockImported { - block_root, + block_root: *block_root, parent_root: block.message().parent_root(), }) .is_err() @@ -986,7 +1005,11 @@ impl Worker { "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); + self.send_sync_message(SyncMessage::UnknownBlock( + peer_id, + block.clone(), + block_root, + )); } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( @@ -1015,6 +1038,16 @@ impl Worker { ); } }; + + if let Err(e) = &result { + self.maybe_store_invalid_block( + &invalid_block_storage, + block_root, + &block, + e, + &self.log, + ); + } } pub fn process_gossip_voluntary_exit( @@ -1720,7 +1753,7 @@ impl Worker { "attn_agg_not_in_committee", ); } - AttnError::AttestationAlreadyKnown { .. } => { + AttnError::AttestationSupersetKnown { .. } => { /* * The aggregate attestation has already been observed on the network or in * a block. @@ -2229,7 +2262,7 @@ impl Worker { "sync_bad_aggregator", ); } - SyncCommitteeError::SyncContributionAlreadyKnown(_) + SyncCommitteeError::SyncContributionSupersetKnown(_) | SyncCommitteeError::AggregatorAlreadyKnown(_) => { /* * The sync committee message already been observed on the network or in @@ -2322,6 +2355,25 @@ impl Worker { "peer_id" => %peer_id, "type" => ?message_type, ); + + // Do not penalize the peer. + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + + return; + } + SyncCommitteeError::PriorSyncContributionMessageKnown { .. } => { + /* + * We have already seen a sync contribution message from this validator for this epoch. + * + * The peer is not necessarily faulty. + */ + debug!( + self.log, + "Prior sync contribution message known"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); // We still penalize the peer slightly. We don't want this to be a recurring // behaviour. self.gossip_penalize_peer( @@ -2486,4 +2538,62 @@ impl Worker { self.propagate_if_timely(is_timely, message_id, peer_id) } + + /// Stores a block as a SSZ file, if and where `invalid_block_storage` dictates. + fn maybe_store_invalid_block( + &self, + invalid_block_storage: &InvalidBlockStorage, + block_root: Hash256, + block: &SignedBeaconBlock, + error: &BlockError, + log: &Logger, + ) { + if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage { + let block_path = base_dir.join(format!("{}_{:?}.ssz", block.slot(), block_root)); + let error_path = base_dir.join(format!("{}_{:?}.error", block.slot(), block_root)); + + let write_file = |path: PathBuf, bytes: &[u8]| { + // No need to write the same file twice. For the error file, + // this means that we'll remember the first error message but + // forget the rest. + if path.exists() { + return; + } + + // Write to the file. + let write_result = fs::OpenOptions::new() + // Only succeed if the file doesn't already exist. We should + // have checked for this earlier. + .create_new(true) + .write(true) + .open(&path) + .map_err(|e| format!("Failed to open file: {:?}", e)) + .map(|mut file| { + file.write_all(bytes) + .map_err(|e| format!("Failed to write file: {:?}", e)) + }); + if let Err(e) = write_result { + error!( + log, + "Failed to store invalid block/error"; + "error" => e, + "path" => ?path, + "root" => ?block_root, + "slot" => block.slot(), + ) + } else { + info!( + log, + "Stored invalid block/error "; + "path" => ?path, + "root" => ?block_root, + "slot" => block.slot(), + ) + } + }; + + write_file(block_path, &block.as_ssz_bytes()); + write_file(error_path, error.to_string().as_bytes()); + } + } } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 81b163bf7e..83baa0417b 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -131,10 +131,10 @@ impl Worker { request_id: PeerRequestId, request: BlocksByRootRequest, ) { - let requested_blocks = request.block_roots.len(); + let requested_blocks = request.block_roots().len(); let mut block_stream = match self .chain - .get_blocks_checking_early_attester_cache(request.block_roots.into(), &executor) + .get_blocks_checking_early_attester_cache(request.block_roots().to_vec(), &executor) { Ok(block_stream) => block_stream, Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e), @@ -292,18 +292,18 @@ impl Worker { ) { debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, + "count" => req.count(), + "start_slot" => req.start_slot(), ); // Should not send more than max request blocks - if req.count > MAX_REQUEST_BLOCKS { - req.count = MAX_REQUEST_BLOCKS; + if *req.count() > MAX_REQUEST_BLOCKS { + *req.count_mut() = MAX_REQUEST_BLOCKS; } let forwards_block_root_iter = match self .chain - .forwards_iter_block_roots(Slot::from(req.start_slot)) + .forwards_iter_block_roots(Slot::from(*req.start_slot())) { Ok(iter) => iter, Err(BeaconChainError::HistoricalBlockError( @@ -326,18 +326,20 @@ impl Worker { // Pick out the required blocks, ignoring skip-slots. let mut last_block_root = None; let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() + iter.take_while(|(_, slot)| { + slot.as_u64() < req.start_slot().saturating_add(*req.count()) + }) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() }); let block_roots = match maybe_block_roots { @@ -364,8 +366,8 @@ impl Worker { Ok(Some(block)) => { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending - if block.slot() >= req.start_slot - && block.slot() < req.start_slot + req.count + if block.slot() >= *req.start_slot() + && block.slot() < req.start_slot() + req.count() { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { @@ -440,15 +442,15 @@ impl Worker { .slot() .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - if blocks_sent < (req.count as usize) { + if blocks_sent < (*req.count() as usize) { debug!( self.log, "BlocksByRange outgoing response processed"; "peer" => %peer_id, "msg" => "Failed to return all requested blocks", - "start_slot" => req.start_slot, + "start_slot" => req.start_slot(), "current_slot" => current_slot, - "requested" => req.count, + "requested" => req.count(), "returned" => blocks_sent ); } else { @@ -456,9 +458,9 @@ impl Worker { self.log, "BlocksByRange outgoing response processed"; "peer" => %peer_id, - "start_slot" => req.start_slot, + "start_slot" => req.start_slot(), "current_slot" => current_slot, - "requested" => req.count, + "requested" => req.count(), "returned" => blocks_sent ); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index ca2095348a..ac59b1daa9 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,7 +7,6 @@ use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; -use beacon_chain::CountUnrealized; use beacon_chain::{ observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, @@ -25,7 +24,7 @@ use types::{Epoch, Hash256, SignedBeaconBlock}; #[derive(Clone, Debug, PartialEq)] pub enum ChainSegmentProcessId { /// Processing Id of a range syncing batch. - RangeBatchId(ChainId, Epoch, CountUnrealized), + RangeBatchId(ChainId, Epoch), /// Processing ID for a backfill syncing batch. BackSyncBatchId(Epoch), /// Processing Id of the parent lookup of a block. @@ -99,15 +98,15 @@ impl Worker { }); // Checks if a block from this proposer is already known. - let proposal_already_known = || { + let block_equivocates = || { match self .chain .observed_block_producers .read() - .proposer_has_been_observed(block.message()) + .proposer_has_been_observed(block.message(), block.canonical_root()) { - Ok(is_observed) => is_observed, - // Both of these blocks will be rejected, so reject them now rather + Ok(seen_status) => seen_status.is_slashable(), + //Both of these blocks will be rejected, so reject them now rather // than re-queuing them. Err(ObserveError::FinalizedBlock { .. }) | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, @@ -117,7 +116,11 @@ impl Worker { // If we've already seen a block from this proposer *and* the block // arrived before the attestation deadline, requeue it to ensure it is // imported late enough that it won't receive a proposer boost. - if !block_is_late && proposal_already_known() { + // + // Don't requeue blocks if they're already known to fork choice, just + // push them through to block processing so they can be handled through + // the normal channels. + if !block_is_late && block_equivocates() { debug!( self.log, "Delaying processing of duplicate RPC block"; @@ -150,12 +153,7 @@ impl Worker { let parent_root = block.message().parent_root(); let result = self .chain - .process_block( - block_root, - block, - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) + .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(())) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -204,17 +202,13 @@ impl Worker { ) { let result = match sync_type { // this a request from the range sync - ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); match self - .process_blocks( - downloaded_blocks.iter(), - count_unrealized, - notify_execution_layer, - ) + .process_blocks(downloaded_blocks.iter(), notify_execution_layer) .await { (_, Ok(_)) => { @@ -293,11 +287,7 @@ impl Worker { // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse match self - .process_blocks( - downloaded_blocks.iter().rev(), - CountUnrealized::True, - notify_execution_layer, - ) + .process_blocks(downloaded_blocks.iter().rev(), notify_execution_layer) .await { (imported_blocks, Err(e)) => { @@ -327,13 +317,12 @@ impl Worker { async fn process_blocks<'a>( &self, downloaded_blocks: impl Iterator>>, - count_unrealized: CountUnrealized, notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec> = downloaded_blocks.cloned().collect(); match self .chain - .process_chain_segment(blocks, count_unrealized, notify_execution_layer) + .process_chain_segment(blocks, notify_execution_layer) .await { ChainSegmentResult::Successful { imported_blocks } => { diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 09caaaa11e..27d7dc9625 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -279,6 +279,12 @@ lazy_static! { "Gossipsub light_client_optimistic_update errors per error type", &["type"] ); + pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = + try_create_int_counter_vec( + "beacon_processor_send_error_per_work_type", + "Total number of beacon processor send error per work type", + &["type"] + ); /* diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 7f75a27fe2..7a91f2d0b1 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -6,7 +6,8 @@ #![allow(clippy::unit_arg)] use crate::beacon_processor::{ - BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, + BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, + MAX_WORK_EVENT_QUEUE_LEN, }; use crate::error; use crate::service::{NetworkMessage, RequestId}; @@ -19,6 +20,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; +use logging::TimeLatch; use slog::{debug, o, trace}; use slog::{error, warn}; use std::cmp; @@ -39,9 +41,11 @@ pub struct Router { /// A network context to return and handle RPC requests. network: HandlerNetworkContext, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, /// The `Router` logger. log: slog::Logger, + /// Provides de-bounce functionality for logging. + logger_debounce: TimeLatch, } /// Types of messages the router can receive. @@ -81,6 +85,7 @@ impl Router { network_globals: Arc>, network_send: mpsc::UnboundedSender>, executor: task_executor::TaskExecutor, + invalid_block_storage: InvalidBlockStorage, log: slog::Logger, ) -> error::Result>> { let message_handler_log = log.new(o!("service"=> "router")); @@ -99,7 +104,7 @@ impl Router { beacon_chain.clone(), network_globals.clone(), network_send.clone(), - beacon_processor_send.clone(), + BeaconProcessorSend(beacon_processor_send.clone()), sync_logger, ); @@ -112,6 +117,7 @@ impl Router { max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, importing_blocks: Default::default(), + invalid_block_storage, log: log.clone(), } .spawn_manager(beacon_processor_receive, None); @@ -122,8 +128,9 @@ impl Router { chain: beacon_chain, sync_send, network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send, + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), log: message_handler_log, + logger_debounce: TimeLatch::default(), }; // spawn handler task and move the message handler instance into the spawned thread @@ -477,12 +484,15 @@ impl Router { self.beacon_processor_send .try_send(work) .unwrap_or_else(|e| { - let work_type = match &e { + let work_type = match &*e { mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => work.work_type(), }; - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) + + if self.logger_debounce.elapsed() { + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + } }) } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index d630cf9c39..2c919233fc 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,5 @@ use super::sync::manager::RequestId as SyncId; +use crate::beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::SyncCommitteeService; @@ -295,6 +296,12 @@ impl NetworkService { } } + let invalid_block_storage = config + .invalid_block_storage + .clone() + .map(InvalidBlockStorage::Enabled) + .unwrap_or(InvalidBlockStorage::Disabled); + // launch derived network services // router task @@ -303,14 +310,14 @@ impl NetworkService { network_globals.clone(), network_senders.network_send(), executor.clone(), + invalid_block_storage, network_log.clone(), )?; // attestation subnet service let attestation_service = AttestationService::new( beacon_chain.clone(), - #[cfg(feature = "deterministic_long_lived_attnets")] - network_globals.local_enr().node_id().raw().into(), + network_globals.local_enr().node_id(), config, &network_log, ); diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index e46a52cfb2..b4f52df39d 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -3,7 +3,6 @@ //! determines whether attestations should be aggregated and/or passed to the beacon node. use super::SubnetServiceMessage; -#[cfg(any(test, feature = "deterministic_long_lived_attnets"))] use std::collections::HashSet; use std::collections::{HashMap, VecDeque}; use std::pin::Pin; @@ -14,10 +13,8 @@ use std::time::Duration; use beacon_chain::{BeaconChain, BeaconChainTypes}; use delay_map::{HashMapDelay, HashSetDelay}; use futures::prelude::*; -use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; -#[cfg(not(feature = "deterministic_long_lived_attnets"))] -use rand::seq::SliceRandom; -use slog::{debug, error, o, trace, warn}; +use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; +use slog::{debug, error, info, o, trace, warn}; use slot_clock::SlotClock; use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; @@ -27,10 +24,6 @@ use crate::metrics; /// slot is less than this number, skip the peer discovery process. /// Subnet discovery query takes at most 30 secs, 2 slots take 24s. pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; -/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from -/// the random gossip topics that we subscribed to due to the validator connection. -#[cfg(not(feature = "deterministic_long_lived_attnets"))] -const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150; /// The fraction of a slot that we subscribe to a subnet before the required slot. /// /// Currently a whole slot ahead. @@ -67,30 +60,23 @@ pub struct AttestationService { /// Subnets we are currently subscribed to as short lived subscriptions. /// /// Once they expire, we unsubscribe from these. + /// We subscribe to subnets when we are an aggregator for an exact subnet. short_lived_subscriptions: HashMapDelay, /// Subnets we are currently subscribed to as long lived subscriptions. /// /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. - #[cfg(feature = "deterministic_long_lived_attnets")] + /// These are required of all beacon nodes. The exact number is determined by the chain + /// specification. long_lived_subscriptions: HashSet, - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - long_lived_subscriptions: HashMapDelay, - /// Short lived subscriptions that need to be done in the future. + /// Short lived subscriptions that need to be executed in the future. scheduled_short_lived_subscriptions: HashSetDelay, /// A collection timeouts to track the existence of aggregate validator subscriptions at an /// `ExactSubnet`. aggregate_validators_on_subnet: Option>, - /// A collection of seen validators. These dictate how many random subnets we should be - /// subscribed to. As these time out, we unsubscribe for the required random subnets and update - /// our ENR. - /// This is a set of validator indices. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - known_validators: HashSetDelay, - /// The waker for the current thread. waker: Option, @@ -100,16 +86,10 @@ pub struct AttestationService { /// We are always subscribed to all subnets. subscribe_all_subnets: bool, - /// For how many slots we subscribe to long lived subnets. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - long_lived_subnet_subscription_slots: u64, - /// Our Discv5 node_id. - #[cfg(feature = "deterministic_long_lived_attnets")] - node_id: ethereum_types::U256, + node_id: NodeId, /// Future used to manage subscribing and unsubscribing from long lived subnets. - #[cfg(feature = "deterministic_long_lived_attnets")] next_long_lived_subscription_event: Pin>, /// Whether this node is a block proposer-only node. @@ -122,62 +102,22 @@ pub struct AttestationService { impl AttestationService { /* Public functions */ - #[cfg(not(feature = "deterministic_long_lived_attnets"))] + /// Establish the service based on the passed configuration. pub fn new( beacon_chain: Arc>, + node_id: NodeId, config: &NetworkConfig, log: &slog::Logger, ) -> Self { let log = log.new(o!("service" => "attestation_service")); - // Calculate the random subnet duration from the spec constants. - let spec = &beacon_chain.spec; let slot_duration = beacon_chain.slot_clock.slot_duration(); - let long_lived_subnet_subscription_slots = spec - .epochs_per_random_subnet_subscription - .saturating_mul(T::EthSpec::slots_per_epoch()); - let long_lived_subscription_duration = Duration::from_millis( - slot_duration.as_millis() as u64 * long_lived_subnet_subscription_slots, - ); - // Panics on overflow. Ensure LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS is not too large. - let last_seen_val_timeout = slot_duration - .checked_mul(LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS) - .expect("LAST_SEEN_VALIDATOR_TIMEOUT must not be ridiculously large"); - - let track_validators = !config.import_all_attestations; - let aggregate_validators_on_subnet = - track_validators.then(|| HashSetDelay::new(slot_duration)); - AttestationService { - events: VecDeque::with_capacity(10), - beacon_chain, - short_lived_subscriptions: HashMapDelay::new(slot_duration), - long_lived_subscriptions: HashMapDelay::new(long_lived_subscription_duration), - scheduled_short_lived_subscriptions: HashSetDelay::default(), - aggregate_validators_on_subnet, - known_validators: HashSetDelay::new(last_seen_val_timeout), - waker: None, - discovery_disabled: config.disable_discovery, - proposer_only: config.proposer_only, - subscribe_all_subnets: config.subscribe_all_subnets, - long_lived_subnet_subscription_slots, - log, + if config.subscribe_all_subnets { + slog::info!(log, "Subscribing to all subnets"); + } else { + slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node, "subscription_duration_in_epochs" => beacon_chain.spec.epochs_per_subnet_subscription); } - } - - #[cfg(feature = "deterministic_long_lived_attnets")] - pub fn new( - beacon_chain: Arc>, - node_id: ethereum_types::U256, - config: &NetworkConfig, - log: &slog::Logger, - ) -> Self { - let log = log.new(o!("service" => "attestation_service")); - - // Calculate the random subnet duration from the spec constants. - let slot_duration = beacon_chain.slot_clock.slot_duration(); - - slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node); let track_validators = !config.import_all_attestations; let aggregate_validators_on_subnet = @@ -198,9 +138,15 @@ impl AttestationService { // value with a smarter timing Box::pin(tokio::time::sleep(Duration::from_secs(1))) }, + proposer_only: config.proposer_only, log, }; - service.recompute_long_lived_subnets(); + + // If we are not subscribed to all subnets, handle the deterministic set of subnets + if !config.subscribe_all_subnets { + service.recompute_long_lived_subnets(); + } + service } @@ -210,20 +156,12 @@ impl AttestationService { if self.subscribe_all_subnets { self.beacon_chain.spec.attestation_subnet_count as usize } else { - #[cfg(feature = "deterministic_long_lived_attnets")] let count = self .short_lived_subscriptions .keys() .chain(self.long_lived_subscriptions.iter()) .collect::>() .len(); - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - let count = self - .short_lived_subscriptions - .keys() - .chain(self.long_lived_subscriptions.keys()) - .collect::>() - .len(); count } } @@ -236,20 +174,20 @@ impl AttestationService { subscription_kind: SubscriptionKind, ) -> bool { match subscription_kind { - #[cfg(feature = "deterministic_long_lived_attnets")] SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - SubscriptionKind::LongLived => self.long_lived_subscriptions.contains_key(subnet_id), SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), } } + #[cfg(test)] + pub(crate) fn long_lived_subscriptions(&self) -> &HashSet { + &self.long_lived_subscriptions + } + /// Processes a list of validator subscriptions. /// /// This will: /// - Register new validators as being known. - /// - Subscribe to the required number of random subnets. - /// - Update the local ENR for new random subnets due to seeing new validators. /// - Search for peers for required subnets. /// - Request subscriptions for subnets on specific slots when required. /// - Build the timeouts for each of these events. @@ -267,18 +205,17 @@ impl AttestationService { // Maps each subnet_id subscription to it's highest slot let mut subnets_to_discover: HashMap = HashMap::new(); + + // Registers the validator with the attestation service. for subscription in subscriptions { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); - // Registers the validator with the attestation service. - // This will subscribe to long-lived random subnets if required. trace!(self.log, "Validator subscription"; "subscription" => ?subscription, ); - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - self.add_known_validator(subscription.validator_index); + // Compute the subnet that is associated with this subscription let subnet_id = match SubnetId::compute_subnet::( subscription.slot, subscription.attestation_committee_index, @@ -316,7 +253,7 @@ impl AttestationService { if subscription.is_aggregator { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); - if let Err(e) = self.subscribe_to_subnet(exact_subnet) { + if let Err(e) = self.subscribe_to_short_lived_subnet(exact_subnet) { warn!(self.log, "Subscription to subnet error"; "error" => e, @@ -347,14 +284,13 @@ impl AttestationService { Ok(()) } - #[cfg(feature = "deterministic_long_lived_attnets")] fn recompute_long_lived_subnets(&mut self) { // Ensure the next computation is scheduled even if assigning subnets fails. let next_subscription_event = self .recompute_long_lived_subnets_inner() .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); - debug!(self.log, "Recomputing deterministic long lived attnets"); + debug!(self.log, "Recomputing deterministic long lived subnets"); self.next_long_lived_subscription_event = Box::pin(tokio::time::sleep(next_subscription_event)); @@ -365,14 +301,13 @@ impl AttestationService { /// Gets the long lived subnets the node should be subscribed to during the current epoch and /// the remaining duration for which they remain valid. - #[cfg(feature = "deterministic_long_lived_attnets")] fn recompute_long_lived_subnets_inner(&mut self) -> Result { let current_epoch = self.beacon_chain.epoch().map_err( |e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e), )?; let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( - self.node_id, + self.node_id.raw().into(), current_epoch, &self.beacon_chain.spec, ) @@ -396,17 +331,12 @@ impl AttestationService { Ok(next_subscription_event) } - #[cfg(all(test, feature = "deterministic_long_lived_attnets"))] - pub fn update_long_lived_subnets_testing(&mut self, subnets: HashSet) { - self.update_long_lived_subnets(subnets) - } - /// Updates the long lived subnets. /// /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr /// updated accordingly. - #[cfg(feature = "deterministic_long_lived_attnets")] fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { + info!(self.log, "Subscribing to long-lived subnets"; "subnets" => ?subnets.iter().collect::>()); for subnet in &subnets { // Add the events for those subnets that are new as long lived subscriptions. if !self.long_lived_subscriptions.contains(subnet) { @@ -430,28 +360,15 @@ impl AttestationService { } } - // Check for subnets that are being removed + // Update the long_lived_subnets set and check for subnets that are being removed std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); for subnet in subnets { if !self.long_lived_subscriptions.contains(&subnet) { - if !self.short_lived_subscriptions.contains_key(&subnet) { - debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet, "subscription_kind" => ?SubscriptionKind::LongLived); - self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - subnet, - ))); - } - - self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation(subnet))); + self.handle_removed_subnet(subnet, SubscriptionKind::LongLived); } } } - /// Overwrites the long lived subscriptions for testing. - #[cfg(all(test, feature = "deterministic_long_lived_attnets"))] - pub fn set_long_lived_subscriptions(&mut self, subnets: HashSet) { - self.long_lived_subscriptions = subnets - } - /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip /// verification, re-propagates and returns false. pub fn should_process_attestation( @@ -535,7 +452,7 @@ impl AttestationService { } // Subscribes to the subnet if it should be done immediately, or schedules it if required. - fn subscribe_to_subnet( + fn subscribe_to_short_lived_subnet( &mut self, ExactSubnet { subnet_id, slot }: ExactSubnet, ) -> Result<(), &'static str> { @@ -564,12 +481,7 @@ impl AttestationService { // immediately. if time_to_subscription_start.is_zero() { // This is a current or past slot, we subscribe immediately. - self.subscribe_to_subnet_immediately( - subnet_id, - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - SubscriptionKind::ShortLived, - slot + 1, - )?; + self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1)?; } else { // This is a future slot, schedule subscribing. trace!(self.log, "Scheduling subnet subscription"; "subnet" => ?subnet_id, "time_to_subscription_start" => ?time_to_subscription_start); @@ -580,79 +492,6 @@ impl AttestationService { Ok(()) } - /// Updates the `known_validators` mapping and subscribes to long lived subnets if required. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - fn add_known_validator(&mut self, validator_index: u64) { - let previously_known = self.known_validators.contains_key(&validator_index); - // Add the new validator or update the current timeout for a known validator. - self.known_validators.insert(validator_index); - if !previously_known { - // New validator has subscribed. - // Subscribe to random topics and update the ENR if needed. - self.subscribe_to_random_subnets(); - } - } - - /// Subscribe to long-lived random subnets and update the local ENR bitfield. - /// The number of subnets to subscribe depends on the number of active validators and number of - /// current subscriptions. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - fn subscribe_to_random_subnets(&mut self) { - if self.subscribe_all_subnets { - // This case is not handled by this service. - return; - } - - let max_subnets = self.beacon_chain.spec.attestation_subnet_count; - // Calculate how many subnets we need, - let required_long_lived_subnets = { - let subnets_for_validators = self - .known_validators - .len() - .saturating_mul(self.beacon_chain.spec.random_subnets_per_validator as usize); - subnets_for_validators // How many subnets we need - .min(max_subnets as usize) // Capped by the max - .saturating_sub(self.long_lived_subscriptions.len()) // Minus those we have - }; - - if required_long_lived_subnets == 0 { - // Nothing to do. - return; - } - - // Build a list of the subnets that we are not currently advertising. - let available_subnets = (0..max_subnets) - .map(SubnetId::new) - .filter(|subnet_id| !self.long_lived_subscriptions.contains_key(subnet_id)) - .collect::>(); - - let subnets_to_subscribe: Vec<_> = available_subnets - .choose_multiple(&mut rand::thread_rng(), required_long_lived_subnets) - .cloned() - .collect(); - - // Calculate in which slot does this subscription end. - let end_slot = match self.beacon_chain.slot_clock.now() { - Some(slot) => slot + self.long_lived_subnet_subscription_slots, - None => { - return debug!( - self.log, - "Failed to calculate end slot of long lived subnet subscriptions." - ) - } - }; - - for subnet_id in &subnets_to_subscribe { - if let Err(e) = self.subscribe_to_subnet_immediately( - *subnet_id, - SubscriptionKind::LongLived, - end_slot, - ) { - debug!(self.log, "Failed to subscribe to long lived subnet"; "subnet" => ?subnet_id, "err" => e); - } - } - } - /* A collection of functions that handle the various timeouts */ /// Registers a subnet as subscribed. @@ -662,11 +501,9 @@ impl AttestationService { /// out the appropriate events. /// /// On determinist long lived subnets, this is only used for short lived subscriptions. - fn subscribe_to_subnet_immediately( + fn subscribe_to_short_lived_subnet_immediately( &mut self, subnet_id: SubnetId, - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - subscription_kind: SubscriptionKind, end_slot: Slot, ) -> Result<(), &'static str> { if self.subscribe_all_subnets { @@ -685,25 +522,12 @@ impl AttestationService { return Err("Time when subscription would end has already passed."); } - #[cfg(feature = "deterministic_long_lived_attnets")] let subscription_kind = SubscriptionKind::ShortLived; // We need to check and add a subscription for the right kind, regardless of the presence // of the subnet as a subscription of the other kind. This is mainly since long lived // subscriptions can be removed at any time when a validator goes offline. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind { - SubscriptionKind::ShortLived => ( - &mut self.short_lived_subscriptions, - self.long_lived_subscriptions.contains_key(&subnet_id), - ), - SubscriptionKind::LongLived => ( - &mut self.long_lived_subscriptions, - self.short_lived_subscriptions.contains_key(&subnet_id), - ), - }; - #[cfg(feature = "deterministic_long_lived_attnets")] let (subscriptions, already_subscribed_as_other_kind) = ( &mut self.short_lived_subscriptions, self.long_lived_subscriptions.contains(&subnet_id), @@ -738,57 +562,19 @@ impl AttestationService { subnet_id, ))); } - - // If this is a new long lived subscription, send out the appropriate events. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - if SubscriptionKind::LongLived == subscription_kind { - let subnet = Subnet::Attestation(subnet_id); - // Advertise this subnet in our ENR. - self.long_lived_subscriptions.insert_at( - subnet_id, - end_slot, - time_to_subscription_end, - ); - self.queue_event(SubnetServiceMessage::EnrAdd(subnet)); - - if !self.discovery_disabled { - self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![ - SubnetDiscovery { - subnet, - min_ttl: None, - }, - ])) - } - } } } Ok(()) } - /// A random subnet has expired. - /// - /// This function selects a new subnet to join, or extends the expiry if there are no more - /// available subnets to choose from. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { - self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); - - // Remove the ENR bitfield bit and choose a new random on from the available subnets - // Subscribe to a new random subnet. - self.subscribe_to_random_subnets(); - } - // Unsubscribes from a subnet that was removed if it does not continue to exist as a // subscription of the other kind. For long lived subscriptions, it also removes the // advertisement from our ENR. fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { let exists_in_other_subscriptions = match subscription_kind { SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), - #[cfg(feature = "deterministic_long_lived_attnets")] SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains_key(&subnet_id), }; if !exists_in_other_subscriptions { @@ -806,48 +592,6 @@ impl AttestationService { ))); } } - - /// A known validator has not sent a subscription in a while. They are considered offline and the - /// beacon node no longer needs to be subscribed to the allocated random subnets. - /// - /// We don't keep track of a specific validator to random subnet, rather the ratio of active - /// validators to random subnets. So when a validator goes offline, we can simply remove the - /// allocated amount of random subnets. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - fn handle_known_validator_expiry(&mut self) { - // Calculate how many subnets should we remove. - let extra_subnet_count = { - let max_subnets = self.beacon_chain.spec.attestation_subnet_count; - let subnets_for_validators = self - .known_validators - .len() - .saturating_mul(self.beacon_chain.spec.random_subnets_per_validator as usize) - .min(max_subnets as usize); - - self.long_lived_subscriptions - .len() - .saturating_sub(subnets_for_validators) - }; - - if extra_subnet_count == 0 { - // Nothing to do - return; - } - - let advertised_subnets = self - .long_lived_subscriptions - .keys() - .cloned() - .collect::>(); - let to_remove_subnets = advertised_subnets - .choose_multiple(&mut rand::thread_rng(), extra_subnet_count) - .cloned(); - - for subnet_id in to_remove_subnets { - self.long_lived_subscriptions.remove(&subnet_id); - self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); - } - } } impl Stream for AttestationService { @@ -868,37 +612,34 @@ impl Stream for AttestationService { return Poll::Ready(Some(event)); } - // Process first any known validator expiries, since these affect how many long lived - // subnets we need. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - match self.known_validators.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_validator_index))) => { - self.handle_known_validator_expiry(); + // If we aren't subscribed to all subnets, handle the deterministic long-lived subnets + if !self.subscribe_all_subnets { + match self.next_long_lived_subscription_event.as_mut().poll(cx) { + Poll::Ready(_) => { + self.recompute_long_lived_subnets(); + // We re-wake the task as there could be other subscriptions to process + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Pending => {} } - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - #[cfg(feature = "deterministic_long_lived_attnets")] - match self.next_long_lived_subscription_event.as_mut().poll(cx) { - Poll::Ready(_) => self.recompute_long_lived_subnets(), - Poll::Pending => {} } // Process scheduled subscriptions that might be ready, since those can extend a soon to // expire subscription. match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { - if let Err(e) = self.subscribe_to_subnet_immediately( - subnet_id, - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - SubscriptionKind::ShortLived, - slot + 1, - ) { + if let Err(e) = + self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1) + { debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet_id, "err" => e); } + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); @@ -910,6 +651,11 @@ impl Stream for AttestationService { match self.short_lived_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); + // We re-wake the task as there could be other subscriptions to process + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); @@ -917,18 +663,6 @@ impl Stream for AttestationService { Poll::Ready(None) | Poll::Pending => {} } - // Process any random subnet expiries. - #[cfg(not(feature = "deterministic_long_lived_attnets"))] - match self.long_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { - self.handle_random_subnet_expiry(subnet_id) - } - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - // Poll to remove entries on expiration, no need to act on expiration events. if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index a407fe1bcf..3b8c89a442 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -126,10 +126,7 @@ fn get_attestation_service( AttestationService::new( beacon_chain, - #[cfg(feature = "deterministic_long_lived_attnets")] - lighthouse_network::discv5::enr::NodeId::random() - .raw() - .into(), + lighthouse_network::discv5::enr::NodeId::random(), &config, &log, ) @@ -179,9 +176,6 @@ async fn get_events + Unpin>( mod attestation_service { - #[cfg(feature = "deterministic_long_lived_attnets")] - use std::collections::HashSet; - #[cfg(not(windows))] use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; @@ -192,8 +186,8 @@ mod attestation_service { attestation_committee_index: CommitteeIndex, slot: Slot, committee_count_at_slot: u64, + is_aggregator: bool, ) -> ValidatorSubscription { - let is_aggregator = true; ValidatorSubscription { validator_index, attestation_committee_index, @@ -203,11 +197,11 @@ mod attestation_service { } } - #[cfg(not(feature = "deterministic_long_lived_attnets"))] fn get_subscriptions( validator_count: u64, slot: Slot, committee_count_at_slot: u64, + is_aggregator: bool, ) -> Vec { (0..validator_count) .map(|validator_index| { @@ -216,6 +210,7 @@ mod attestation_service { validator_index, slot, committee_count_at_slot, + is_aggregator, ) }) .collect() @@ -229,6 +224,7 @@ mod attestation_service { // Keep a low subscription slot so that there are no additional subnet discovery events. let subscription_slot = 0; let committee_count = 1; + let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // create the attestation service and subscriptions let mut attestation_service = get_attestation_service(None); @@ -243,6 +239,7 @@ mod attestation_service { committee_index, current_slot + Slot::new(subscription_slot), committee_count, + true, )]; // submit the subscriptions @@ -266,16 +263,19 @@ mod attestation_service { // Wait for 1 slot duration to get the unsubscription event let events = get_events( &mut attestation_service, - Some(5), + Some(subnets_per_node * 3 + 2), (MainnetEthSpec::slots_per_epoch() * 3) as u32, ) .await; matches::assert_matches!( - events[..3], + events[..6], [ SubnetServiceMessage::Subscribe(_any1), SubnetServiceMessage::EnrAdd(_any3), SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Subscribe(_), + SubnetServiceMessage::EnrAdd(_), + SubnetServiceMessage::DiscoverPeers(_), ] ); @@ -284,10 +284,10 @@ mod attestation_service { if !attestation_service .is_subscribed(&subnet_id, attestation_subnets::SubscriptionKind::LongLived) { - assert_eq!(expected[..], events[3..]); + assert_eq!(expected[..], events[subnets_per_node * 3..]); } - // Should be subscribed to only 1 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), 1); + // Should be subscribed to only subnets_per_node long lived subnet after unsubscription. + assert_eq!(attestation_service.subscription_count(), subnets_per_node); } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. @@ -297,6 +297,7 @@ mod attestation_service { // subscription config let validator_index = 1; let committee_count = 1; + let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // Makes 2 validator subscriptions to the same subnet but at different slots. // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). @@ -318,6 +319,7 @@ mod attestation_service { com1, current_slot + Slot::new(subscription_slot1), committee_count, + true, ); let sub2 = get_subscription( @@ -325,6 +327,7 @@ mod attestation_service { com2, current_slot + Slot::new(subscription_slot2), committee_count, + true, ); let subnet_id1 = SubnetId::compute_subnet::( @@ -366,16 +369,22 @@ mod attestation_service { let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); - // Should be still subscribed to 1 long lived and 1 short lived subnet if both are + // Should be still subscribed to 2 long lived and up to 1 short lived subnet if both are // different. if !attestation_service.is_subscribed( &subnet_id1, attestation_subnets::SubscriptionKind::LongLived, ) { - assert_eq!(expected, events[3]); - assert_eq!(attestation_service.subscription_count(), 2); + // The index is 3*subnets_per_node (because we subscribe + discover + enr per long lived + // subnet) + 1 + let index = 3 * subnets_per_node; + assert_eq!(expected, events[index]); + assert_eq!( + attestation_service.subscription_count(), + subnets_per_node + 1 + ); } else { - assert_eq!(attestation_service.subscription_count(), 1); + assert!(attestation_service.subscription_count() == subnets_per_node); } // Get event for 1 more slot duration, we should get the unsubscribe event now. @@ -395,17 +404,17 @@ mod attestation_service { ); } - // Should be subscribed to only 1 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), 1); + // Should be subscribed 2 long lived subnet after unsubscription. + assert_eq!(attestation_service.subscription_count(), subnets_per_node); } - #[cfg(not(feature = "deterministic_long_lived_attnets"))] #[tokio::test] - async fn subscribe_all_random_subnets() { + async fn subscribe_all_subnets() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; - let subscription_slot = 10; + let subscription_slot = 3; let subscription_count = attestation_subnet_count; let committee_count = 1; + let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // create the attestation service and subscriptions let mut attestation_service = get_attestation_service(None); @@ -419,6 +428,7 @@ mod attestation_service { subscription_count, current_slot + subscription_slot, committee_count, + true, ); // submit the subscriptions @@ -426,42 +436,52 @@ mod attestation_service { .validator_subscriptions(subscriptions) .unwrap(); - let events = get_events(&mut attestation_service, None, 3).await; + let events = get_events(&mut attestation_service, Some(131), 10).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unexpected_msg_count = 0; + let mut unsubscribe_event_count = 0; for event in &events { match event { SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, SubnetServiceMessage::Subscribe(_any_subnet) => {} SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, + SubnetServiceMessage::Unsubscribe(_) => unsubscribe_event_count += 1, _ => unexpected_msg_count += 1, } } + // There should be a Subscribe Event, and Enr Add event and a DiscoverPeers event for each + // long-lived subnet initially. The next event should be a bulk discovery event. + let bulk_discovery_index = 3 * subnets_per_node; // The bulk discovery request length should be equal to validator_count - let bulk_discovery_event = events.last().unwrap(); + let bulk_discovery_event = &events[bulk_discovery_index]; if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { assert_eq!(d.len(), attestation_subnet_count as usize); } else { panic!("Unexpected event {:?}", bulk_discovery_event); } - // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets + // 64 `DiscoverPeer` requests of length 1 corresponding to deterministic subnets // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. - assert_eq!(discover_peer_count, subscription_count + 1); - assert_eq!(attestation_service.subscription_count(), 64); - assert_eq!(enr_add_count, 64); + assert_eq!(discover_peer_count, subnets_per_node + 1); + assert_eq!(attestation_service.subscription_count(), subnets_per_node); + assert_eq!(enr_add_count, subnets_per_node); + assert_eq!( + unsubscribe_event_count, + attestation_subnet_count - subnets_per_node as u64 + ); assert_eq!(unexpected_msg_count, 0); // test completed successfully } - #[cfg(not(feature = "deterministic_long_lived_attnets"))] #[tokio::test] - async fn subscribe_all_random_subnets_plus_one() { + async fn subscribe_correct_number_of_subnets() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; let subscription_slot = 10; + let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; + // the 65th subscription should result in no more messages than the previous scenario let subscription_count = attestation_subnet_count + 1; let committee_count = 1; @@ -478,6 +498,7 @@ mod attestation_service { subscription_count, current_slot + subscription_slot, committee_count, + true, ); // submit the subscriptions @@ -506,12 +527,12 @@ mod attestation_service { } else { panic!("Unexpected event {:?}", bulk_discovery_event); } - // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets + // subnets_per_node `DiscoverPeer` requests of length 1 corresponding to long-lived subnets // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. - // For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity. - assert_eq!(discover_peer_count, 64 + 1); - assert_eq!(attestation_service.subscription_count(), 64); - assert_eq!(enr_add_count, 64); + + assert_eq!(discover_peer_count, subnets_per_node + 1); + assert_eq!(attestation_service.subscription_count(), subnets_per_node); + assert_eq!(enr_add_count, subnets_per_node); assert_eq!(unexpected_msg_count, 0); } @@ -521,6 +542,7 @@ mod attestation_service { // subscription config let validator_index = 1; let committee_count = 1; + let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // Makes 2 validator subscriptions to the same subnet but at different slots. // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). @@ -542,6 +564,7 @@ mod attestation_service { com1, current_slot + Slot::new(subscription_slot1), committee_count, + true, ); let sub2 = get_subscription( @@ -549,6 +572,7 @@ mod attestation_service { com2, current_slot + Slot::new(subscription_slot2), committee_count, + true, ); let subnet_id1 = SubnetId::compute_subnet::( @@ -596,11 +620,10 @@ mod attestation_service { &subnet_id1, attestation_subnets::SubscriptionKind::LongLived, ) { - assert_eq!(expected_subscription, events[3]); - // fourth is a discovery event - assert_eq!(expected_unsubscription, events[5]); + assert_eq!(expected_subscription, events[subnets_per_node * 3]); + assert_eq!(expected_unsubscription, events[subnets_per_node * 3 + 2]); } - assert_eq!(attestation_service.subscription_count(), 1); + assert_eq!(attestation_service.subscription_count(), 2); println!("{events:?}"); let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the @@ -633,40 +656,44 @@ mod attestation_service { } #[tokio::test] - #[cfg(feature = "deterministic_long_lived_attnets")] async fn test_update_deterministic_long_lived_subnets() { let mut attestation_service = get_attestation_service(None); - let new_subnet = SubnetId::new(1); - let maintained_subnet = SubnetId::new(2); - let removed_subnet = SubnetId::new(3); + let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = get_subscriptions(20, current_slot, 30, false); + + // submit the subscriptions attestation_service - .set_long_lived_subscriptions(HashSet::from([removed_subnet, maintained_subnet])); - // clear initial events - let _events = get_events(&mut attestation_service, None, 1).await; + .validator_subscriptions(subscriptions) + .unwrap(); - attestation_service - .update_long_lived_subnets_testing(HashSet::from([maintained_subnet, new_subnet])); - - let events = get_events(&mut attestation_service, None, 1).await; - let new_subnet = Subnet::Attestation(new_subnet); - let removed_subnet = Subnet::Attestation(removed_subnet); + // There should only be the same subscriptions as there are in the specification, + // regardless of subscriptions assert_eq!( - events, + attestation_service.long_lived_subscriptions().len(), + subnets_per_node + ); + + let events = get_events(&mut attestation_service, None, 4).await; + + // Check that we attempt to subscribe and register ENRs + matches::assert_matches!( + events[..6], [ - // events for the new subnet - SubnetServiceMessage::Subscribe(new_subnet), - SubnetServiceMessage::EnrAdd(new_subnet), - SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet: new_subnet, - min_ttl: None - }]), - // events for the removed subnet - SubnetServiceMessage::Unsubscribe(removed_subnet), - SubnetServiceMessage::EnrRemove(removed_subnet), + SubnetServiceMessage::Subscribe(_), + SubnetServiceMessage::EnrAdd(_), + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Subscribe(_), + SubnetServiceMessage::EnrAdd(_), + SubnetServiceMessage::DiscoverPeers(_), ] ); - println!("{events:?}") } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 256a2b4297..62ca68e7bc 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -156,9 +156,7 @@ impl SingleBlockRequest { cannot_process: self.failed_processing >= self.failed_downloading, }) } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest { - block_roots: VariableList::from(vec![self.hash]), - }; + let request = BlocksByRootRequest::new(VariableList::from(vec![self.hash])); self.state = State::Downloading { peer_id }; self.used_peers.insert(peer_id); Ok((peer_id, request)) diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 5a70944f6c..82334db0f8 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use crate::beacon_processor::BeaconProcessorSend; use crate::service::RequestId; use crate::sync::manager::RequestId as SyncId; use crate::NetworkMessage; @@ -54,7 +55,7 @@ impl TestRig { SyncNetworkContext::new( network_tx, globals, - beacon_processor_tx, + BeaconProcessorSend(beacon_processor_tx), log.new(slog::o!("component" => "network_context")), ) }; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 230c883a93..c24d4c192b 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::BlockLookups; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; @@ -188,7 +188,7 @@ pub fn spawn( beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, log: slog::Logger, ) -> mpsc::UnboundedSender> { assert!( @@ -556,7 +556,7 @@ impl SyncManager { .parent_block_processed(chain_hash, result, &mut self.network), }, SyncMessage::BatchProcessed { sync_type, result } => match sync_type { - ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { self.range_sync.handle_block_process_result( &mut self.network, chain_id, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index c81fed2443..03c466eece 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,7 +3,7 @@ use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId}; -use crate::beacon_processor::WorkEvent; +use crate::beacon_processor::BeaconProcessorSend; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChainTypes, EngineState}; @@ -37,7 +37,7 @@ pub struct SyncNetworkContext { execution_engine_state: EngineState, /// Channel to send work to the beacon processor. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, /// Logger for the `SyncNetworkContext`. log: slog::Logger, @@ -47,7 +47,7 @@ impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, network_globals: Arc>, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, log: slog::Logger, ) -> Self { Self { @@ -112,7 +112,7 @@ impl SyncNetworkContext { self.log, "Sending BlocksByRange Request"; "method" => "BlocksByRange", - "count" => request.count, + "count" => request.count(), "peer" => %peer_id, ); let request = Request::BlocksByRange(request); @@ -138,7 +138,7 @@ impl SyncNetworkContext { self.log, "Sending backfill BlocksByRange Request"; "method" => "BlocksByRange", - "count" => request.count, + "count" => request.count(), "peer" => %peer_id, ); let request = Request::BlocksByRange(request); @@ -185,7 +185,7 @@ impl SyncNetworkContext { self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "count" => request.block_roots.len(), + "count" => request.block_roots().len(), "peer" => %peer_id ); let request = Request::BlocksByRoot(request); @@ -209,7 +209,7 @@ impl SyncNetworkContext { self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "count" => request.block_roots.len(), + "count" => request.block_roots().len(), "peer" => %peer_id ); let request = Request::BlocksByRoot(request); @@ -278,12 +278,12 @@ impl SyncNetworkContext { }) } - pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender>> { + pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend> { self.is_execution_engine_online() .then_some(&self.beacon_processor_send) } - pub fn processor_channel(&self) -> &mpsc::Sender> { + pub fn processor_channel(&self) -> &BeaconProcessorSend { &self.beacon_processor_send } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 3eee7223db..723ea9b59d 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -202,10 +202,10 @@ impl BatchInfo { /// Returns a BlocksByRange request associated with the batch. pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { - BlocksByRangeRequest { - start_slot: self.start_slot.into(), - count: self.end_slot.sub(self.start_slot).into(), - } + BlocksByRangeRequest::new( + self.start_slot.into(), + self.end_slot.sub(self.start_slot).into(), + ) } /// After different operations over a batch, this could be in a state that allows it to diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 4226b600f5..51ca9e2b07 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -3,7 +3,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEven use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, }; -use beacon_chain::{BeaconChainTypes, CountUnrealized}; +use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use rand::seq::SliceRandom; @@ -101,8 +101,6 @@ pub struct SyncingChain { /// Batches validated by this chain. validated_batches: u64, - is_finalized_segment: bool, - /// The chain's log. log: slog::Logger, } @@ -128,7 +126,6 @@ impl SyncingChain { target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, - is_finalized_segment: bool, log: &slog::Logger, ) -> Self { let mut peers = FnvHashMap::default(); @@ -150,7 +147,6 @@ impl SyncingChain { state: ChainSyncingState::Stopped, current_processing_batch: None, validated_batches: 0, - is_finalized_segment, log: log.new(o!("chain" => id)), } } @@ -318,12 +314,7 @@ impl SyncingChain { // for removing chains and checking completion is in the callback. let blocks = batch.start_processing()?; - let count_unrealized = if self.is_finalized_segment { - CountUnrealized::False - } else { - CountUnrealized::True - }; - let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); + let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); self.current_processing_batch = Some(batch_id); if let Err(e) = diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 37a3f13e73..65ddcefe85 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -465,10 +465,10 @@ impl ChainCollection { network: &mut SyncNetworkContext, ) { let id = SyncingChain::::id(&target_head_root, &target_head_slot); - let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type { - (&mut self.finalized_chains, true) + let collection = if let RangeSyncType::Finalized = sync_type { + &mut self.finalized_chains } else { - (&mut self.head_chains, false) + &mut self.head_chains }; match collection.entry(id) { Entry::Occupied(mut entry) => { @@ -493,7 +493,6 @@ impl ChainCollection { target_head_slot, target_head_root, peer, - is_finalized, &self.log, ); debug_assert_eq!(new_chain.get_id(), id); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 0f1c00e509..2c35c57d9e 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -375,7 +375,7 @@ mod tests { use crate::NetworkMessage; use super::*; - use crate::beacon_processor::WorkEvent as BeaconWorkEvent; + use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent}; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; @@ -603,7 +603,7 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, globals.clone(), - beacon_processor_tx, + BeaconProcessorSend(beacon_processor_tx), log.new(o!("component" => "network_context")), ); let test_rig = TestRig { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 633cbf0438..646356b6cb 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,5 +1,6 @@ use clap::{App, Arg}; use strum::VariantNames; +use types::ProgressiveBalancesMode; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("beacon_node") @@ -116,7 +117,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("PORT") .help("The UDP port that discovery will listen on over IpV6 if listening over \ both Ipv4 and IpV6. Defaults to `port6`") - .hidden(true) // TODO: implement dual stack via two sockets in discv5. .takes_value(true), ) .arg( @@ -198,7 +198,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .requires("enr-udp-port") .multiple(true) .max_values(2) .takes_value(true), @@ -282,7 +281,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") .takes_value(false), ) - + .arg( + Arg::with_name("inbound-rate-limiter") + .long("inbound-rate-limiter") + .help( + "Configures the inbound rate limiter (requests received by this node).\ + \ + Rate limit quotas per protocol can be set in the form of \ + :/. To set quotas for multiple protocols, \ + separate them by ';'. If the inbound rate limiter is enabled and a protocol is not \ + present in the configuration, the default quotas will be used. \ + \ + This is enabled by default, using default quotas. To disable rate limiting pass \ + `disabled` to this option instead." + ) + .takes_value(true) + .hidden(true) + ) .arg( Arg::with_name("disable-backfill-rate-limiting") .long("disable-backfill-rate-limiting") @@ -671,7 +686,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("max-skip-slots") .long("max-skip-slots") .help( - "Refuse to skip more than this many slots when processing a block or attestation. \ + "Refuse to skip more than this many slots when processing an attestation. \ This prevents nodes on minority forks from wasting our time and disk space, \ but could also cause unnecessary consensus failures, so is disabled by default." ) @@ -776,8 +791,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("slasher-broadcast") .long("slasher-broadcast") .help("Broadcast slashings found by the slasher to the rest of the network \ - [disabled by default].") - .requires("slasher") + [Enabled by default].") + .takes_value(true) + .default_value("true") ) .arg( Arg::with_name("slasher-backend") @@ -1081,7 +1097,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("gui") .hidden(true) .help("Enable the graphical user interface and all its requirements. \ - This is equivalent to --http and --validator-monitor-auto.") + This enables --http and --validator-monitor-auto and enables SSE logging.") .takes_value(false) ) .arg( @@ -1093,4 +1109,26 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { // always using the builder. .conflicts_with("builder-profit-threshold") ) + .arg( + Arg::with_name("invalid-gossip-verified-blocks-path") + .long("invalid-gossip-verified-blocks-path") + .value_name("PATH") + .help("If a block succeeds gossip validation whilst failing full validation, store \ + the block SSZ as a file at this path. This feature is only recommended for \ + developers. This directory is not pruned, users should be careful to avoid \ + filling up their disks.") + ) + .arg( + Arg::with_name("progressive-balances") + .long("progressive-balances") + .value_name("MODE") + .help("Options to enable or disable the progressive balances cache for \ + unrealized FFG progression calculation. The default `checked` mode compares \ + the progressive balances from the cache against results from the existing \ + method. If there is a mismatch, it falls back to the existing method. The \ + optimized mode (`fast`) is faster but is still experimental, and is \ + not recommended for mainnet usage at this time.") + .takes_value(true) + .possible_values(ProgressiveBalancesMode::VARIANTS) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f05fea2db1..948c70dd41 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -633,7 +633,9 @@ pub fn get_config( slasher_config.validator_chunk_size = validator_chunk_size; } - slasher_config.broadcast = cli_args.is_present("slasher-broadcast"); + if let Some(broadcast) = clap_utils::parse_optional(cli_args, "slasher-broadcast")? { + slasher_config.broadcast = broadcast; + } if let Some(backend) = clap_utils::parse_optional(cli_args, "slasher-backend")? { slasher_config.backend = backend; @@ -793,6 +795,17 @@ pub fn get_config( client_config.chain.enable_backfill_rate_limiting = !cli_args.is_present("disable-backfill-rate-limiting"); + if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")? + { + client_config.network.invalid_block_storage = Some(path); + } + + if let Some(progressive_balances_mode) = + clap_utils::parse_optional(cli_args, "progressive-balances")? + { + client_config.chain.progressive_balances_mode = progressive_balances_mode; + } + Ok(client_config) } @@ -1227,6 +1240,7 @@ pub fn set_network_config( // Light client server config. config.enable_light_client_server = cli_args.is_present("light-client-server"); + // The self limiter is disabled by default. // This flag can be used both with or without a value. Try to parse it first with a value, if // no value is defined but the flag is present, use the default params. config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; @@ -1247,7 +1261,22 @@ pub fn set_network_config( config.proposer_only = true; warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); } - + // The inbound rate limiter is enabled by default unless `disabled` is passed to the + // `inbound-rate-limiter` flag. Any other value should be parsed as a configuration string. + config.inbound_rate_limiter_config = match cli_args.value_of("inbound-rate-limiter") { + None => { + // Enabled by default, with default values + Some(Default::default()) + } + Some("disabled") => { + // Explicitly disabled + None + } + Some(config_str) => { + // Enabled with a custom configuration + Some(config_str.parse()?) + } + }; Ok(()) } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 650763dcaf..47694825ca 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -16,7 +16,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; pub use config::{get_config, get_data_dir, get_slots_per_restore_point, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; -use slasher::Slasher; +use slasher::{DatabaseBackendOverride, Slasher}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; @@ -86,7 +86,27 @@ impl ProductionBeaconNode { .http_api_config(client_config.http_api.clone()) .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; - let builder = if let Some(slasher_config) = client_config.slasher.clone() { + let builder = if let Some(mut slasher_config) = client_config.slasher.clone() { + match slasher_config.override_backend() { + DatabaseBackendOverride::Success(old_backend) => { + info!( + log, + "Slasher backend overriden"; + "reason" => "database exists", + "configured_backend" => %old_backend, + "override_backend" => %slasher_config.backend, + ); + } + DatabaseBackendOverride::Failure(path) => { + warn!( + log, + "Slasher backend override failed"; + "advice" => "delete old MDBX database or enable MDBX backend", + "path" => path.display() + ); + } + _ => {} + } let slasher = Arc::new( Slasher::open(slasher_config, log.new(slog::o!("service" => "slasher"))) .map_err(|e| format!("Slasher open error: {:?}", e))?, diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index a1c65bd26d..a952f1b2ff 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -10,7 +10,7 @@ beacon_chain = {path = "../beacon_chain"} [dependencies] db-key = "0.0.5" -leveldb = { version = "0.8.6", default-features = false } +leveldb = { version = "0.8.6" } parking_lot = "0.12.0" itertools = "0.10.0" ethereum_ssz = "0.5.0" diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index cd923da40d..9f2532d0a7 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -373,6 +373,7 @@ macro_rules! impl_try_into_beacon_state { // Caching total_active_balance: <_>::default(), + progressive_balances_cache: <_>::default(), committee_caches: <_>::default(), pubkey_cache: <_>::default(), exit_cache: <_>::default(), diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index cd50babdb0..bac5d3cc82 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -63,7 +63,7 @@ where .load_cold_state_by_slot(lower_limit_slot)? .ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?; - state.build_all_caches(&self.spec)?; + state.build_caches(&self.spec)?; process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/book/src/LaTeX/full-withdrawal.tex b/book/src/LaTeX/full-withdrawal.tex index 2447ba0974..a4b384872b 100644 --- a/book/src/LaTeX/full-withdrawal.tex +++ b/book/src/LaTeX/full-withdrawal.tex @@ -37,7 +37,7 @@ \rput[bl](9.0,-3.49){27.3 hours} \rput[bl](8.8,-5.49){Varying time} \rput[bl](8.7,-5.99){validator sweep} - \rput[bl](8.9,-6.59){up to 5 days} + \rput[bl](8.9,-6.59){up to \textit{n} days} \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89) \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89) \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29) diff --git a/book/src/LaTeX/partial-withdrawal.tex b/book/src/LaTeX/partial-withdrawal.tex index 05db3b6888..4d1d0b5f0a 100644 --- a/book/src/LaTeX/partial-withdrawal.tex +++ b/book/src/LaTeX/partial-withdrawal.tex @@ -31,7 +31,7 @@ \rput[bl](0.9,-1.59){Beacon chain} \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09) \rput[bl](7.6,-3.99){validator sweep} - \rput[bl](7.5,-4.69){$\sim$ every 5 days} + \rput[bl](7.82,-4.73){every \textit{n} days} \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09) \rput[bl](1.3,-4.09){BLS to} \rput[bl](0.5,-4.69){execution change} diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index bfd5a02a6f..7431d22387 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -18,10 +18,11 @@ * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) + * [Validator Graffiti](./graffiti.md) * [APIs](./api.md) * [Beacon Node API](./api-bn.md) - * [/lighthouse](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) + * [Lighthouse API](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) * [Validator Client API](./api-vc.md) * [Endpoints](./api-vc-endpoints.md) * [Authorization Header](./api-vc-auth-header.md) @@ -36,7 +37,6 @@ * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) - * [Validator Graffiti](./graffiti.md) * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) @@ -46,9 +46,8 @@ * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) - * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) - * [MEV and Lighthouse](./builders.md) + * [MEV](./builders.md) * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) * [Contributing](./contributing.md) diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md deleted file mode 100644 index f3f4a52304..0000000000 --- a/book/src/advanced-pre-releases.md +++ /dev/null @@ -1,4 +0,0 @@ -# Pre-Releases - -Pre-releases are now referred to as [Release Candidates](./advanced-release-candidates.md). The terms may -be used interchangeably. diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced-release-candidates.md index 842bc48404..b2ff021365 100644 --- a/book/src/advanced-release-candidates.md +++ b/book/src/advanced-release-candidates.md @@ -7,7 +7,7 @@ [`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 From time-to-time, Lighthouse *release candidates* will be published on the [sigp/lighthouse] -repository. These releases have passed the usual automated testing, however the developers would +repository. Release candidates are previously known as Pre-Releases. These releases have passed the usual automated testing, however the developers would like to see it running "in the wild" in a variety of configurations before declaring it an official, stable release. Release candidates are also used by developers to get feedback from users regarding the ergonomics of new features or changes. @@ -36,8 +36,9 @@ Users may wish to try a release candidate for the following reasons: - To help detect bugs and regressions before they reach production. - To provide feedback on annoyances before they make it into a release and become harder to change or revert. +There can also be a scenario that a bug has been found and requires an urgent fix. An example of incidence is [v4.0.2-rc.0](https://github.com/sigp/lighthouse/releases/tag/v4.0.2-rc.0) which contains a hot-fix to address high CPU usage experienced after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023. In this scenario, we will announce the release candidate on [Github](https://github.com/sigp/lighthouse/releases) and also on [Discord](https://discord.gg/cyAszAh) to recommend users to update to the release candidate version. + ## When *not* to use a release candidate -It is not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). -To test critical features, try one of the testnets (e.g., Prater). +Other than the above scenarios, it is generally not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). To test new release candidate features, try one of the testnets (e.g., Goerli). diff --git a/book/src/advanced.md b/book/src/advanced.md index d46cae6990..51416a3b73 100644 --- a/book/src/advanced.md +++ b/book/src/advanced.md @@ -6,4 +6,18 @@ elsewhere? This section provides detailed information about configuring Lighthouse for specific use cases, and tips about how things work under the hood. -* [Advanced Database Configuration](./advanced_database.md): understanding space-time trade-offs in the database. +* [Checkpoint Sync](./checkpoint-sync.md): quickly sync the beacon chain to perform validator duties. +* [Custom Data Directories](./advanced-datadir.md): modify the data directory to your preferred location. +* [Proposer Only Beacon Nodes](./advanced-proposer-only.md): beacon node only for proposer duty for increased anonymity. +* [Remote Signing with Web3Signer](./validator-web3signer.md): don't want to store your keystore in local node? Use web3signer. +* [Database Configuration](./advanced_database.md): understanding space-time trade-offs in the database. +* [Database Migrations](./database-migrations.md): have a look at all previous Lighthouse database scheme versions. +* [Key Management](./key-management.md): explore how to generate wallet with Lighthouse. +* [Key Recovery](./key-recovery.md): explore how to recover wallet and validator with Lighthouse. +* [Advanced Networking](./advanced_networking.md): open your ports to have a diverse and healthy set of peers. +* [Running a Slasher](./slasher.md): contribute to the health of the network by running a slasher. +* [Redundancy](./redundancy.md): want to have more than one beacon node as backup? This is for you. +* [Release Candidates](./advanced-release-candidates.md): latest release of Lighthouse to get feedback from users. +* [Maximal Extractable Value](./builders.md): use external builders for a potential higher rewards during block proposals +* [Merge Migration](./merge-migration.md): look at what you need to do during a significant network upgrade: The Merge +* [Late Block Re-orgs](./late-block-re-orgs.md): read information about Lighthouse late block re-orgs. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 57e49531ca..d951104054 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -23,13 +23,17 @@ states to slow down dramatically. A lower _slots per restore point_ value (SPRP) frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows some example values. -| Use Case | SPRP | Yearly Disk Usage | Load Historical State | -|--------------------------|------|-------------------|-----------------------| -| Block explorer/analysis | 32 | 1.4 TB | 155 ms | -| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | -| Validator only (default) | 8192 | 5.7 GB | 41 s | +| Use Case | SPRP | Yearly Disk Usage* | Load Historical State | +|----------------------------|------|-------------------|-----------------------| +| Research | 32 | 3.4 TB | 155 ms | +| Block explorer/analysis | 128 | 851 GB | 620 ms | +| Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s | +| Hobbyist | 4096 | 26.8 GB | 20.5 s | +| Validator only (default) | 8192 | 8.1 GB | 41 s | -As you can see, it's a high-stakes trade-off! The relationships to disk usage and historical state +*Last update: May 2023. + +As we can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP is 32, and the maximum is 8192. @@ -38,9 +42,11 @@ The default value is 8192 for databases synced from scratch using Lighthouse v2. The values shown in the table are approximate, calculated using a simple heuristic: each `BeaconState` consumes around 18MB of disk space, and each block replayed takes around 5ms. The -**Yearly Disk Usage** column shows the approx size of the freezer DB _alone_ (hot DB not included), -and the **Load Historical State** time is the worst-case load time for a state in the last slot -before a restore point. +**Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. +The **Load Historical State** time is the worst-case load time for a state in the last slot +before a restore point. + +As an example, we use an SPRP of 4096 to calculate the total size of the freezer database until May 2023. It has been about 900 days since the genesis, the total disk usage by the freezer database is therefore: 900/365*26.8 GB = 66 GB. ### Defaults @@ -68,6 +74,8 @@ The historical state cache size can be specified with the flag `--historic-state lighthouse beacon_node --historic-state-cache-size 4 ``` +> Note: This feature will cause high memory usage. + ## Glossary * _Freezer DB_: part of the database storing finalized states. States are stored in a sparser diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 08d276ba35..586503cb96 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -38,7 +38,6 @@ large peer count will not speed up sync. For these reasons, we recommend users do not modify the `--target-peers` count drastically and use the (recommended) default. - ### NAT Traversal (Port Forwarding) Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will @@ -51,11 +50,11 @@ peers for your node and overall improve the Ethereum consensus network. Lighthouse currently supports UPnP. If UPnP is enabled on your router, Lighthouse will automatically establish the port mappings for you (the beacon node will inform you of established routes in this case). If UPnP is not -enabled, we recommend you manually set up port mappings to both of Lighthouse's +enabled, we recommend you to manually set up port mappings to both of Lighthouse's TCP and UDP ports (9000 by default). > Note: Lighthouse needs to advertise its publicly accessible ports in -> order to inform its peers that it is contactable and how to connect to it. +> order to inform its peers that it is contactable and how to connect to it. > Lighthouse has an automated way of doing this for the UDP port. This means > Lighthouse can detect its external UDP port. There is no such mechanism for the > TCP port. As such, we assume that the external UDP and external TCP port is the @@ -63,6 +62,28 @@ TCP and UDP ports (9000 by default). > explicitly specify them using the `--enr-tcp-port` and `--enr-udp-port` as > explained in the following section. +### How to Open Ports + +The steps to do port forwarding depends on the router, but the general steps are given below: +1. Determine the default gateway IP: +- On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. +- On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. +- On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. + + The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. + +2. Login to the router management page. The login credentials are usually available in the manual or the router, or it can be found on a sticker underneath the router. You can also try the login credentials for some common router brands listed [here](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/). + +3. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". + +4. Configure a port forwarding rule as below: +- Protocol: select `TCP/UDP` or `BOTH` +- External port: `9000` +- Internal port: `9000` +- IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse + +5. To check that you have successfully open the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. + ### ENR Configuration @@ -81,7 +102,82 @@ and if it is, it will update your ENR to the correct public IP and port address (meaning you do not need to set it manually). Lighthouse persists its ENR, so on reboot it will re-load the settings it had discovered previously. -Modifying the ENR settings can degrade the discovery of your node making it +Modifying the ENR settings can degrade the discovery of your node, making it harder for peers to find you or potentially making it harder for other peers to find each other. We recommend not touching these settings unless for a more advanced use case. + + +### IPv6 support + +As noted in the previous sections, two fundamental parts to ensure good +connectivity are: The parameters that configure the sockets over which +Lighthouse listens for connections, and the parameters used to tell other peers +how to connect to your node. This distinction is relevant and applies to most +nodes that do not run directly on a public network. + +#### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack + +To listen over only IPv6 use the same parameters as done when listening over +IPv4 only: + +- `--listen-addresses :: --port 9909` will listen over IPv6 using port `9909` for +TCP and UDP. +- `--listen-addresses :: --port 9909 --discovery-port 9999` will listen over + IPv6 using port `9909` for TCP and port `9999` for UDP. + +To listen over both IPv4 and IPv6: +- Set two listening addresses using the `--listen-addresses` flag twice ensuring + the two addresses are one IPv4, and the other IPv6. When doing so, the + `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note + that this behaviour differs from the Ipv6 only case described above. +- If necessary, set the `--port6` flag to configure the port used for TCP and + UDP over IPv6. This flag has no effect when listening over IPv6 only. +- If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP + port. This will default to the value given to `--port6` if not set. This flag + has no effect when listening over IPv6 only. + +##### Configuration Examples + +- `--listen-addresses :: --listen-addresses 0.0.0.0 --port 9909` will listen + over IPv4 using port `9909` for TCP and UDP. It will also listen over IPv6 but + using the default value for `--port6` for UDP and TCP (`9090`). +- `--listen-addresses :: --listen-addresses --port 9909 --discovery-port6 9999` + will have the same configuration as before except for the IPv6 UDP socket, + which will use port `9999`. + +#### Configuring Lighthouse to advertise IPv6 reachable addresses +Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively, +and dual stack using one socket for IPv6 and another socket for IPv6. In both +scenarios, the previous sections still apply. In summary: + +> Beacon nodes must advertise their publicly reachable socket address + +In order to do so, lighthouse provides the following CLI options/parameters. + +- `--enr-udp-port` Use this to advertise the port that is publicly reachable + over UDP with a publicly reachable IPv4 address. This might differ from the + IPv4 port used to listen. +- `--enr-udp6-port` Use this to advertise the port that is publicly reachable + over UDP with a publicly reachable IPv6 address. This might differ from the + IPv6 port used to listen. +- `--enr-tcp-port` Use this to advertise the port that is publicly reachable + over TCP with a publicly reachable IPv4 address. This might differ from the + IPv4 port used to listen. +- `--enr-tcp6-port` Use this to advertise the port that is publicly reachable + over TCP with a publicly reachable IPv6 address. This might differ from the + IPv6 port used to listen. +- `--enr-addresses` Use this to advertise publicly reachable addresses. Takes at + most two values, one for IPv4 and one for IPv6. Note that a beacon node that + advertises some address, must be + reachable both over UDP and TCP. + +In the general case, an user will not require to set these explicitly. Update +these options only if you can guarantee your node is reachable with these +values. + +#### Known caveats + +IPv6 link local addresses are likely to have poor connectivity if used in +topologies with more than one interface. Use global addresses for the general +case. diff --git a/book/src/api-bn.md b/book/src/api-bn.md index b86e593bf1..11a006493a 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -5,7 +5,7 @@ specification][OpenAPI]. Please follow that link for a full description of each ## Starting the server -A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5052`. +A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. The default listen address is `http://127.0.0.1:5052`. The following CLI flags control the HTTP server: @@ -55,11 +55,8 @@ Additional risks to be aware of include: ## CLI Example -Start the beacon node with the HTTP server listening on [http://localhost:5052](http://localhost:5052): +Start a beacon node and an execution node according to [Run a node](./run_a_node.md). Note that since [The Merge](https://ethereum.org/en/roadmap/merge/), an execution client is required to be running along with a beacon node. Hence, the query on Beacon Node APIs requires users to run both. While there are some Beacon Node APIs that you can query with only the beacon node, such as the [node version](https://ethereum.github.io/beacon-APIs/#/Node/getNodeVersion), in general an execution client is required to get the updated information about the beacon chain, such as [state root](https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot), [headers](https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockHeaders) and many others, which are dynamically progressing with time. -```bash -lighthouse bn --http -``` ## HTTP Request/Response Examples @@ -77,40 +74,46 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: appl ```json { + "execution_optimistic": false, + "finalized": false, "data": { - "root": "0x4381454174fc28c7095077e959dcab407ae5717b5dca447e74c340c1b743d7b2", + "root": "0x9059bbed6b8891e0ba2f656dbff93fc40f8c7b2b7af8fea9df83cfce5ee5e3d8", "canonical": true, "header": { "message": { - "slot": "3199", - "proposer_index": "19077", - "parent_root": "0xf1934973041c5896d0d608e52847c3cd9a5f809c59c64e76f6020e3d7cd0c7cd", - "state_root": "0xe8e468f9f5961655dde91968f66480868dab8d4147de9498111df2b7e4e6fe60", - "body_root": "0x6f183abc6c4e97f832900b00d4e08d4373bfdc819055d76b0f4ff850f559b883" + "slot": "6271829", + "proposer_index": "114398", + "parent_root": "0x1d2b4fa8247f754a7a86d36e1d0283a5e425491c431533716764880a7611d225", + "state_root": "0x2b48adea290712f56b517658dde2da5d36ee01c41aebe7af62b7873b366de245", + "body_root": "0x6fa74c995ce6f397fa293666cde054d6a9741f7ec280c640bee51220b4641e2d" }, - "signature": "0x988064a2f9cf13fe3aae051a3d85f6a4bca5a8ff6196f2f504e32f1203b549d5f86a39c6509f7113678880701b1881b50925a0417c1c88a750c8da7cd302dda5aabae4b941e3104d0cf19f5043c4f22a7d75d0d50dad5dbdaf6991381dc159ab" + "signature": "0x8258e64fea426033676a0045c50543978bf173114ba94822b12188e23cbc8d8e89e0b5c628a881bf3075d325bc11341105a4e3f9332ac031d89a93b422525b79e99325928a5262f17dfa6cc3ddf84ca2466fcad86a3c168af0d045f79ef52036" } } } ``` +The `jq` tool is used to format the JSON data properly. If it returns `jq: command not found`, then you can install `jq` with `sudo apt install -y jq`. After that, run the command again, and it should return the head state of the beacon chain. + ### View the status of a validator Shows the status of validator at index `1` at the `head` state. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" | jq +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" ``` ```json { + "execution_optimistic": false, + "finalized": false, "data": { "index": "1", - "balance": "63985937939", - "status": "Active", + "balance": "32004587169", + "status": "active_ongoing", "validator": { - "pubkey": "0x873e73ee8b3e4fcf1d2fb0f1036ba996ac9910b5b348f6438b5f8ef50857d4da9075d0218a9d1b99a9eae235a39703e1", - "withdrawal_credentials": "0x00b8cdcf79ba7e74300a07e9d8f8121dd0d8dd11dcfd6d3f2807c45b426ac968", + "pubkey": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", + "withdrawal_credentials": "0x01000000000000000000000015f4b914a0ccd14333d850ff311d6dafbfbaa32b", "effective_balance": "32000000000", "slashed": false, "activation_eligibility_epoch": "0", @@ -121,6 +124,7 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H " } } ``` +You can replace `1` in the above command with the validator index that you would like to query. Other API query can be done similarly by changing the link according to the Beacon API. ## Serving the HTTP API over TLS > **Warning**: This feature is currently experimental. @@ -147,9 +151,18 @@ openssl req -x509 -nodes -newkey rsa:4096 -keyout key.pem -out cert.pem -days 36 Note that currently Lighthouse only accepts keys that are not password protected. This means we need to run with the `-nodes` flag (short for 'no DES'). -Once generated, we can run Lighthouse: +Once generated, we can run Lighthouse and an execution node according to [Run a node](./run_a_node.md). In addition, add the flags `--http-enable-tls --http-tls-cert cert.pem --http-tls-key key.pem` to Lighthouse, the command should look like: + ```bash -lighthouse bn --http --http-enable-tls --http-tls-cert cert.pem --http-tls-key key.pem +lighthouse bn \ + --network mainnet \ + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex \ + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ + --http \ + --http-enable-tls \ + --http-tls-cert cert.pem \ + --http-tls-key key.pem ``` Note that the user running Lighthouse must have permission to read the certificate and key. @@ -159,6 +172,7 @@ The API is now being served at `https://localhost:5052`. To test connectivity, you can run the following: ```bash curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq + ``` ### Connecting a validator client In order to connect a validator client to a beacon node over TLS, the validator @@ -201,13 +215,13 @@ Ensure the `--http` flag has been supplied at the CLI. You can quickly check that the HTTP endpoint is up using `curl`: ```bash -curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" | jq +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept:application/json" ``` The beacon node should respond with its version: ```json -{"data":{"version":"Lighthouse/v0.2.9-6f7b4768a/x86_64-linux"}} +{"data":{"version":"Lighthouse/v4.1.0-693886b/x86_64-linux"} ``` If this doesn't work, the server might not be started or there might be a diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index e67a79c8f0..7626d64013 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -1,8 +1,8 @@ # Lighthouse Non-Standard APIs Lighthouse fully supports the standardization efforts at -[github.com/ethereum/beacon-APIs](https://github.com/ethereum/beacon-APIs), -however sometimes development requires additional endpoints that shouldn't +[github.com/ethereum/beacon-APIs](https://github.com/ethereum/beacon-APIs). +However, sometimes development requires additional endpoints that shouldn't necessarily be defined as a broad-reaching standard. Such endpoints are placed behind the `/lighthouse` path. @@ -16,10 +16,12 @@ Although we don't recommend that users rely on these endpoints, we document them briefly so they can be utilized by developers and researchers. + + ### `/lighthouse/health` +*Note: This endpoint is presently only available on Linux.* -*Presently only available on Linux.* - +Returns information regarding the health of the host machine. ```bash curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq ``` @@ -63,7 +65,7 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` ### `/lighthouse/ui/health` - +Returns information regarding the health of the host machine. ```bash curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: application/json" | jq @@ -83,24 +85,24 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio "global_cpu_frequency": 3.4, "disk_bytes_total": 502390845440, "disk_bytes_free": 9981386752, - "network_name": "wlp0s20f3", - "network_bytes_total_received": 14105556611, - "network_bytes_total_transmit": 3649489389, - "nat_open": true, - "connected_peers": 80, - "sync_state": "Synced", "system_uptime": 660706, "app_uptime": 105, "system_name": "Arch Linux", "kernel_version": "5.19.13-arch1-1", "os_version": "Linux rolling Arch Linux", "host_name": "Computer1" + "network_name": "wlp0s20f3", + "network_bytes_total_received": 14105556611, + "network_bytes_total_transmit": 3649489389, + "nat_open": true, + "connected_peers": 80, + "sync_state": "Synced", } } ``` ### `/lighthouse/ui/validator_count` - +Returns an overview of validators. ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq ``` @@ -121,9 +123,9 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap } ``` + ### `/lighthouse/ui/validator_metrics` -Re-exposes certain metrics from the validator monitor to the HTTP API. -Will only return metrics for the validators currently being monitored and are present in the POST data. +Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. ```bash curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq ``` @@ -148,24 +150,40 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic } } ``` +Running this API without the flag `--validator-monitor-auto` in the beacon node will return null: +```json +{ + "data": { + "validators": {} + } +} +``` ### `/lighthouse/syncing` - +Returns the sync status of the beacon node. ```bash curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq ``` -```json -{ - "data": { - "SyncingFinalized": { - "start_slot": 3104, - "head_slot": 343744, - "head_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" +There are two possible outcomes, depending on whether the beacon node is syncing or synced. + +1. Syncing: + ```json + { + "data": { + "SyncingFinalized": { + "start_slot": "5478848", + "target_slot": "5478944" + } + } } - } -} -``` + ``` +1. Synced: + ```json + { + "data": "Synced" + } + ``` ### `/lighthouse/peers` @@ -173,96 +191,137 @@ curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/ curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq ``` + ```json [ { - "peer_id": "16Uiu2HAmA9xa11dtNv2z5fFbgF9hER3yq35qYNTPvN7TdAmvjqqv", + "peer_id": "16Uiu2HAm2ZoWQ2zkzsMFvf5o7nXa7R5F7H1WzZn2w7biU3afhgov", "peer_info": { - "_status": "Healthy", "score": { - "score": 0 + "Real": { + "lighthouse_score": 0, + "gossipsub_score": -18371.409037358582, + "ignore_negative_gossipsub_score": false, + "score": -21.816048231863316 + } }, "client": { "kind": "Lighthouse", - "version": "v0.2.9-1c9a055c", - "os_version": "aarch64-linux", - "protocol_version": "lighthouse/libp2p", - "agent_string": "Lighthouse/v0.2.9-1c9a055c/aarch64-linux" + "version": "v4.1.0-693886b", + "os_version": "x86_64-linux", + "protocol_version": "eth2/1.0.0", + "agent_string": "Lighthouse/v4.1.0-693886b/x86_64-linux" }, "connection_status": { "status": "disconnected", "connections_in": 0, "connections_out": 0, - "last_seen": 1082, + "last_seen": 9028, "banned_ips": [] }, "listening_addresses": [ - "/ip4/80.109.35.174/tcp/9000", - "/ip4/127.0.0.1/tcp/9000", - "/ip4/192.168.0.73/tcp/9000", - "/ip4/172.17.0.1/tcp/9000", - "/ip6/::1/tcp/9000" + "/ip4/212.102.59.173/tcp/23452", + "/ip4/23.124.84.197/tcp/23452", + "/ip4/127.0.0.1/tcp/23452", + "/ip4/192.168.0.2/tcp/23452", + "/ip4/192.168.122.1/tcp/23452" + ], + "seen_addresses": [ + "23.124.84.197:23452" ], "sync_status": { - "Advanced": { + "Synced": { "info": { - "status_head_slot": 343829, - "status_head_root": "0xe34e43efc2bb462d9f364bc90e1f7f0094e74310fd172af698b5a94193498871", - "status_finalized_epoch": 10742, - "status_finalized_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + "head_slot": "5468141", + "head_root": "0x7acc017a199c0cf0693a19e0ed3a445a02165c03ea6f46cb5ffb8f60bf0ebf35", + "finalized_epoch": "170877", + "finalized_root": "0xbbc3541637976bd03b526de73e60a064e452a4b873b65f43fa91fefbba140410" } } }, "meta_data": { - "seq_number": 160, - "attnets": "0x0000000800000080" - } + "V2": { + "seq_number": 501, + "attnets": "0x0000020000000000", + "syncnets": "0x00" + } + }, + "subnets": [], + "is_trusted": false, + "connection_direction": "Outgoing", + "enr": "enr:-L64QI37ReMIki2Uqln3pcgQyAH8Y3ceSYrtJp1FlDEGSM37F7ngCpS9k-SKQ1bOHp0zFCkNxpvFlf_3o5OUkBRw0qyCAfqHYXR0bmV0c4gAAAIAAAAAAIRldGgykGKJQe8DABAg__________-CaWSCdjSCaXCEF3xUxYlzZWNwMjU2azGhAmoW921eIvf8pJhOvOwuxLSxKnpLY2inE_bUILdlZvhdiHN5bmNuZXRzAIN0Y3CCW5yDdWRwgluc" } } ] ``` ### `/lighthouse/peers/connected` - +Returns information about connected peers. ```bash curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq ``` + + ```json [ - { - "peer_id": "16Uiu2HAkzJC5TqDSKuLgVUsV4dWat9Hr8EjNZUb6nzFb61mrfqBv", + { + "peer_id": "16Uiu2HAmCAvpoYE6ABGdQJaW4iufVqNCTJU5AqzyZPB2D9qba7ZU", "peer_info": { - "_status": "Healthy", "score": { - "score": 0 + "Real": { + "lighthouse_score": 0, + "gossipsub_score": 0, + "ignore_negative_gossipsub_score": false, + "score": 0 + } }, "client": { "kind": "Lighthouse", - "version": "v0.2.8-87181204+", + "version": "v3.5.1-319cc61", "os_version": "x86_64-linux", - "protocol_version": "lighthouse/libp2p", - "agent_string": "Lighthouse/v0.2.8-87181204+/x86_64-linux" + "protocol_version": "eth2/1.0.0", + "agent_string": "Lighthouse/v3.5.1-319cc61/x86_64-linux" }, "connection_status": { "status": "connected", - "connections_in": 1, - "connections_out": 0, - "last_seen": 0, - "banned_ips": [] + "connections_in": 0, + "connections_out": 1, + "last_seen": 0 }, "listening_addresses": [ - "/ip4/34.204.178.218/tcp/9000", + "/ip4/144.91.92.17/tcp/9000", "/ip4/127.0.0.1/tcp/9000", - "/ip4/172.31.67.58/tcp/9000", - "/ip4/172.17.0.1/tcp/9000", - "/ip6/::1/tcp/9000" + "/ip4/172.19.0.3/tcp/9000" ], - "sync_status": "Unknown", + "seen_addresses": [ + "144.91.92.17:9000" + ], + "sync_status": { + "Synced": { + "info": { + "head_slot": "5468930", + "head_root": "0x25409073c65d2f6f5cee20ac2eff5ab980b576ca7053111456063f8ff8f67474", + "finalized_epoch": "170902", + "finalized_root": "0xab59473289e2f708341d8e5aafd544dd88e09d56015c90550ea8d16c50b4436f" + } + } + }, "meta_data": { - "seq_number": 1819, - "attnets": "0xffffffffffffffff" - } + "V2": { + "seq_number": 67, + "attnets": "0x0000000080000000", + "syncnets": "0x00" + } + }, + "subnets": [ + { + "Attestation": "39" + } + ], + "is_trusted": false, + "connection_direction": "Outgoing", + "enr": "enr:-Ly4QHd3RHJdkuR1iE6MtVtibC5S-aiWGPbwi4cG3wFGbqxRAkAgLDseTzPFQQIehQ7LmO7KIAZ5R1fotjMQ_LjA8n1Dh2F0dG5ldHOIAAAAAAAQAACEZXRoMpBiiUHvAwAQIP__________gmlkgnY0gmlwhJBbXBGJc2VjcDI1NmsxoQL4z8A7B-NS29zOgvkTX1YafKandwOtrqQ1XRnUJj3se4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" } } ] @@ -297,7 +356,8 @@ health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - For correct execution client voting this timestamp should be later than the -`voting_period_start_timestamp`. +`voting_target_timestamp`. + - `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the execution node is from the head of the execution chain. @@ -420,11 +480,11 @@ curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq ### `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list -of objects, each including the validator index, epoch, and `is_live` status of a requested validator. +of objects, each including the validator index, epoch, and `is_live` status of a requested validator. -This endpoint is used in doppelganger detection, and will only provide accurate information for the -current, previous, or next epoch. +This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. +> Note that for this API, if you insert an arbitrary epoch other than the previous, current or next epoch of the network, it will return `"code:400"` and `BAD_REQUEST`. ```bash curl -X POST "http://localhost:5052/lighthouse/liveness" -d '{"indices":["0","1"],"epoch":"1"}' -H "content-type: application/json" | jq @@ -442,6 +502,8 @@ curl -X POST "http://localhost:5052/lighthouse/liveness" -d '{"indices":["0","1" } ``` + + ### `/lighthouse/database/info` Information about the database's split point and anchor info. @@ -450,26 +512,29 @@ Information about the database's split point and anchor info. curl "http://localhost:5052/lighthouse/database/info" | jq ``` + ```json { - "schema_version": 5, + "schema_version": 16, "config": { - "slots_per_restore_point": 2048, + "slots_per_restore_point": 8192, + "slots_per_restore_point_set_explicitly": false, "block_cache_size": 5, "historic_state_cache_size": 1, "compact_on_init": false, - "compact_on_prune": true + "compact_on_prune": true, + "prune_payloads": true }, "split": { - "slot": "2034912", - "state_root": "0x11c8516aa7d4d1613e84121e3a557ceca34618b4c1a38f05b66ad045ff82b33b" + "slot": "5485952", + "state_root": "0xcfe5d41e6ab5a9dab0de00d89d97ae55ecaeed3b08e4acda836e69b2bef698b4" }, "anchor": { - "anchor_slot": "2034720", - "oldest_block_slot": "1958881", - "oldest_block_parent": "0x1fd3d855d03e9df28d8a41a0f9cb9d4c540832b3ca1c3e1d7e09cd75b874cc87", - "state_upper_limit": "2035712", - "state_lower_limit": "0" + "anchor_slot": "5414688", + "oldest_block_slot": "0", + "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_upper_limit": "5414912", + "state_lower_limit": "8192" } } ``` @@ -504,12 +569,12 @@ Manually provide `SignedBeaconBlock`s to backfill the database. This is intended for use by Lighthouse developers during testing only. ### `/lighthouse/merge_readiness` - +Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: ```bash curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq ``` -``` +```json { "data":{ "type":"ready", @@ -521,6 +586,21 @@ curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq } ``` +As all testnets and Mainnet have been merged, both values will be the same after The Merge. An example of response on the Goerli testnet: + +```json +{ + "data": { + "type": "ready", + "config": { + "terminal_total_difficulty": "10790000" + }, + "current_difficulty": "10790000" + } +} +``` + + ### `/lighthouse/analysis/attestation_performance/{index}` Fetch information about the attestation performance of a validator index or all validators for a @@ -611,20 +691,35 @@ Two query parameters are required: Example: ```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq +curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=1" | jq ``` + +The first few lines of the response would look like: + ```json [ { - "block_root": "0x51576c2fcf0ab68d7d93c65e6828e620efbb391730511ffa35584d6c30e51410", - "attestation_rewards": { - "total": 4941156, + "total": 637260, + "block_root": "0x4a089c5e390bb98e66b27358f157df825128ea953cee9d191229c0bcf423a4f6", + "meta": { + "slot": "1", + "parent_slot": "0", + "proposer_index": 93, + "graffiti": "EF #vm-eth2-raw-iron-prater-101" }, - .. - }, - .. -] + "attestation_rewards": { + "total": 637260, + "prev_epoch_total": 0, + "curr_epoch_total": 637260, + "per_attestation_rewards": [ + { + "50102": 780, + } + ] + } + } +] ``` Caveats: @@ -653,6 +748,8 @@ Two query parameters are required: curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq ``` +An excerpt of the response looks like: + ```json [ { @@ -679,3 +776,44 @@ Caveats: This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. + +### `/lighthouse/logs` + +This is a Server Side Event subscription endpoint. This allows a user to read +the Lighthouse logs directly from the HTTP API endpoint. This currently +exposes INFO and higher level logs. It is only enabled when the `--gui` flag is set in the CLI. + +Example: + +```bash +curl -N "http://localhost:5052/lighthouse/logs" +``` + +Should provide an output that emits log events as they occur: +```json +{ +"data": { + "time": "Mar 13 15:28:41", + "level": "INFO", + "msg": "Syncing", + "service": "slot_notifier", + "est_time": "1 hr 27 mins", + "speed": "5.33 slots/sec", + "distance": "28141 slots (3 days 21 hrs)", + "peers": "8" + } +} +``` + +### `/lighthouse/nat` +Checks if the ports are open. + +```bash +curl -X GET "http://localhost:5052/lighthouse/nat" | jq +``` + +An open port will return: +```json +{ + "data": true +} \ No newline at end of file diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 80a14ae771..ee0cfd2001 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -17,8 +17,11 @@ HTTP Path | Description | [`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. [`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. +The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). + In addition to the above endpoints Lighthouse also supports all of the [standard keymanager APIs](https://ethereum.github.io/keymanager-APIs/). + ## `GET /lighthouse/version` Returns the software version and `git` commit hash for the Lighthouse binary. @@ -32,15 +35,28 @@ Returns the software version and `git` commit hash for the Lighthouse binary. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200 | -### Example Response Body +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq +``` + +Example Response Body: + ```json { "data": { - "version": "Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux" + "version": "Lighthouse/v4.1.0-693886b/x86_64-linux" } } ``` +> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/API-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. + +> As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176`. In this case, you obtain the token from the file `API token.txt` and the command becomes: +```bash +curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176" | jq +``` ## `GET /lighthouse/health` @@ -57,24 +73,48 @@ Returns information regarding the health of the host machine. *Note: this endpoint is presently only available on Linux.* -### Example Response Body +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/health" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq +``` + +Example Response Body: ```json { - "data": { - "pid": 1476293, - "pid_num_threads": 19, - "pid_mem_resident_set_size": 4009984, - "pid_mem_virtual_memory_size": 1306775552, - "sys_virt_mem_total": 33596100608, - "sys_virt_mem_available": 23073017856, - "sys_virt_mem_used": 9346957312, - "sys_virt_mem_free": 22410510336, - "sys_virt_mem_percent": 31.322334, - "sys_loadavg_1": 0.98, - "sys_loadavg_5": 0.98, - "sys_loadavg_15": 1.01 - } + "data": { + "sys_virt_mem_total": 8184274944, + "sys_virt_mem_available": 1532280832, + "sys_virt_mem_used": 6248341504, + "sys_virt_mem_free": 648790016, + "sys_virt_mem_percent": 81.27775, + "sys_virt_mem_cached": 1244770304, + "sys_virt_mem_buffers": 42373120, + "sys_loadavg_1": 2.33, + "sys_loadavg_5": 2.11, + "sys_loadavg_15": 2.47, + "cpu_cores": 4, + "cpu_threads": 8, + "system_seconds_total": 103095, + "user_seconds_total": 750734, + "iowait_seconds_total": 60671, + "idle_seconds_total": 3922305, + "cpu_time_total": 4794222, + "disk_node_bytes_total": 982820896768, + "disk_node_bytes_free": 521943703552, + "disk_node_reads_total": 376287830, + "disk_node_writes_total": 48232652, + "network_node_bytes_total_received": 143003442144, + "network_node_bytes_total_transmit": 185348289905, + "misc_node_boot_ts_seconds": 1681740973, + "misc_os": "linux", + "pid": 144072, + "pid_num_threads": 27, + "pid_mem_resident_set_size": 15835136, + "pid_mem_virtual_memory_size": 2179018752, + "pid_process_seconds_total": 54 + } } ``` @@ -91,7 +131,13 @@ Returns information regarding the health of the host machine. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200 | -### Example Response Body +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/ui/health" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq + ``` + +Example Response Body ```json { @@ -130,7 +176,12 @@ Returns the graffiti that will be used for the next block proposal of each valid | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200 | -### Example Response Body +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/ui/graffiti" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq + ``` +Example Response Body ```json { @@ -155,71 +206,115 @@ Returns the Ethereum proof-of-stake consensus specification loaded for this vali | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200 | -### Example Response Body +Command: + +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/spec" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq +``` + +Example Response Body ```json { - "data": { - "CONFIG_NAME": "mainnet", - "MAX_COMMITTEES_PER_SLOT": "64", - "TARGET_COMMITTEE_SIZE": "128", - "MIN_PER_EPOCH_CHURN_LIMIT": "4", - "CHURN_LIMIT_QUOTIENT": "65536", - "SHUFFLE_ROUND_COUNT": "90", - "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "1024", - "MIN_GENESIS_TIME": "1601380800", - "GENESIS_DELAY": "172800", - "MIN_DEPOSIT_AMOUNT": "1000000000", - "MAX_EFFECTIVE_BALANCE": "32000000000", - "EJECTION_BALANCE": "16000000000", - "EFFECTIVE_BALANCE_INCREMENT": "1000000000", - "HYSTERESIS_QUOTIENT": "4", - "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", - "HYSTERESIS_UPWARD_MULTIPLIER": "5", - "PROPORTIONAL_SLASHING_MULTIPLIER": "3", - "GENESIS_FORK_VERSION": "0x00000002", - "BLS_WITHDRAWAL_PREFIX": "0x00", - "SECONDS_PER_SLOT": "12", - "MIN_ATTESTATION_INCLUSION_DELAY": "1", - "MIN_SEED_LOOKAHEAD": "1", - "MAX_SEED_LOOKAHEAD": "4", - "MIN_EPOCHS_TO_INACTIVITY_PENALTY": "4", - "MIN_VALIDATOR_WITHDRAWABILITY_DELAY": "256", - "SHARD_COMMITTEE_PERIOD": "256", - "BASE_REWARD_FACTOR": "64", - "WHISTLEBLOWER_REWARD_QUOTIENT": "512", - "PROPOSER_REWARD_QUOTIENT": "8", - "INACTIVITY_PENALTY_QUOTIENT": "16777216", - "MIN_SLASHING_PENALTY_QUOTIENT": "32", - "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", - "DOMAIN_BEACON_PROPOSER": "0x00000000", - "DOMAIN_BEACON_ATTESTER": "0x01000000", - "DOMAIN_RANDAO": "0x02000000", - "DOMAIN_DEPOSIT": "0x03000000", - "DOMAIN_VOLUNTARY_EXIT": "0x04000000", - "DOMAIN_SELECTION_PROOF": "0x05000000", - "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", - "DOMAIN_APPLICATION_MASK": "0x00000001", - "MAX_VALIDATORS_PER_COMMITTEE": "2048", - "SLOTS_PER_EPOCH": "32", - "EPOCHS_PER_ETH1_VOTING_PERIOD": "32", - "SLOTS_PER_HISTORICAL_ROOT": "8192", - "EPOCHS_PER_HISTORICAL_VECTOR": "65536", - "EPOCHS_PER_SLASHINGS_VECTOR": "8192", - "HISTORICAL_ROOTS_LIMIT": "16777216", - "VALIDATOR_REGISTRY_LIMIT": "1099511627776", - "MAX_PROPOSER_SLASHINGS": "16", - "MAX_ATTESTER_SLASHINGS": "2", - "MAX_ATTESTATIONS": "128", - "MAX_DEPOSITS": "16", - "MAX_VOLUNTARY_EXITS": "16", - "ETH1_FOLLOW_DISTANCE": "1024", - "TARGET_AGGREGATORS_PER_COMMITTEE": "16", - "RANDOM_SUBNETS_PER_VALIDATOR": "1", - "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION": "256", - "SECONDS_PER_ETH1_BLOCK": "14", - "DEPOSIT_CONTRACT_ADDRESS": "0x48b597f4b53c21b48ad95c7256b49d1779bd5890" - } + "data": { + "CONFIG_NAME": "prater", + "PRESET_BASE": "mainnet", + "TERMINAL_TOTAL_DIFFICULTY": "10790000", + "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", + "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", + "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY": "128", + "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", + "MIN_GENESIS_TIME": "1614588812", + "GENESIS_FORK_VERSION": "0x00001020", + "GENESIS_DELAY": "1919188", + "ALTAIR_FORK_VERSION": "0x01001020", + "ALTAIR_FORK_EPOCH": "36660", + "BELLATRIX_FORK_VERSION": "0x02001020", + "BELLATRIX_FORK_EPOCH": "112260", + "CAPELLA_FORK_VERSION": "0x03001020", + "CAPELLA_FORK_EPOCH": "162304", + "SECONDS_PER_SLOT": "12", + "SECONDS_PER_ETH1_BLOCK": "14", + "MIN_VALIDATOR_WITHDRAWABILITY_DELAY": "256", + "SHARD_COMMITTEE_PERIOD": "256", + "ETH1_FOLLOW_DISTANCE": "2048", + "INACTIVITY_SCORE_BIAS": "4", + "INACTIVITY_SCORE_RECOVERY_RATE": "16", + "EJECTION_BALANCE": "16000000000", + "MIN_PER_EPOCH_CHURN_LIMIT": "4", + "CHURN_LIMIT_QUOTIENT": "65536", + "PROPOSER_SCORE_BOOST": "40", + "DEPOSIT_CHAIN_ID": "5", + "DEPOSIT_NETWORK_ID": "5", + "DEPOSIT_CONTRACT_ADDRESS": "0xff50ed3d0ec03ac01d4c79aad74928bff48a7b2b", + "MAX_COMMITTEES_PER_SLOT": "64", + "TARGET_COMMITTEE_SIZE": "128", + "MAX_VALIDATORS_PER_COMMITTEE": "2048", + "SHUFFLE_ROUND_COUNT": "90", + "HYSTERESIS_QUOTIENT": "4", + "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", + "HYSTERESIS_UPWARD_MULTIPLIER": "5", + "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", + "MIN_DEPOSIT_AMOUNT": "1000000000", + "MAX_EFFECTIVE_BALANCE": "32000000000", + "EFFECTIVE_BALANCE_INCREMENT": "1000000000", + "MIN_ATTESTATION_INCLUSION_DELAY": "1", + "SLOTS_PER_EPOCH": "32", + "MIN_SEED_LOOKAHEAD": "1", + "MAX_SEED_LOOKAHEAD": "4", + "EPOCHS_PER_ETH1_VOTING_PERIOD": "64", + "SLOTS_PER_HISTORICAL_ROOT": "8192", + "MIN_EPOCHS_TO_INACTIVITY_PENALTY": "4", + "EPOCHS_PER_HISTORICAL_VECTOR": "65536", + "EPOCHS_PER_SLASHINGS_VECTOR": "8192", + "HISTORICAL_ROOTS_LIMIT": "16777216", + "VALIDATOR_REGISTRY_LIMIT": "1099511627776", + "BASE_REWARD_FACTOR": "64", + "WHISTLEBLOWER_REWARD_QUOTIENT": "512", + "PROPOSER_REWARD_QUOTIENT": "8", + "INACTIVITY_PENALTY_QUOTIENT": "67108864", + "MIN_SLASHING_PENALTY_QUOTIENT": "128", + "PROPORTIONAL_SLASHING_MULTIPLIER": "1", + "MAX_PROPOSER_SLASHINGS": "16", + "MAX_ATTESTER_SLASHINGS": "2", + "MAX_ATTESTATIONS": "128", + "MAX_DEPOSITS": "16", + "MAX_VOLUNTARY_EXITS": "16", + "INACTIVITY_PENALTY_QUOTIENT_ALTAIR": "50331648", + "MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR": "64", + "PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR": "2", + "SYNC_COMMITTEE_SIZE": "512", + "EPOCHS_PER_SYNC_COMMITTEE_PERIOD": "256", + "MIN_SYNC_COMMITTEE_PARTICIPANTS": "1", + "INACTIVITY_PENALTY_QUOTIENT_BELLATRIX": "16777216", + "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX": "32", + "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX": "3", + "MAX_BYTES_PER_TRANSACTION": "1073741824", + "MAX_TRANSACTIONS_PER_PAYLOAD": "1048576", + "BYTES_PER_LOGS_BLOOM": "256", + "MAX_EXTRA_DATA_BYTES": "32", + "MAX_BLS_TO_EXECUTION_CHANGES": "16", + "MAX_WITHDRAWALS_PER_PAYLOAD": "16", + "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP": "16384", + "DOMAIN_DEPOSIT": "0x03000000", + "BLS_WITHDRAWAL_PREFIX": "0x00", + "RANDOM_SUBNETS_PER_VALIDATOR": "1", + "DOMAIN_SYNC_COMMITTEE": "0x07000000", + "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE": "16", + "DOMAIN_BEACON_ATTESTER": "0x01000000", + "DOMAIN_VOLUNTARY_EXIT": "0x04000000", + "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF": "0x08000000", + "DOMAIN_CONTRIBUTION_AND_PROOF": "0x09000000", + "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION": "256", + "TARGET_AGGREGATORS_PER_COMMITTEE": "16", + "DOMAIN_APPLICATION_MASK": "0x00000001", + "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", + "DOMAIN_RANDAO": "0x02000000", + "DOMAIN_SELECTION_PROOF": "0x05000000", + "DOMAIN_BEACON_PROPOSER": "0x00000000", + "SYNC_COMMITTEE_SUBNET_COUNT": "4" + } } ``` @@ -240,13 +335,13 @@ file may be read by a local user with access rights. | Required Headers | - | | Typical Responses | 200 | -### Example Path +Command: -``` -localhost:5062/lighthouse/auth +```bash +curl http://localhost:5062/lighthouse/auth | jq ``` -### Example Response Body +Example Response Body ```json { @@ -267,7 +362,14 @@ Lists all validators managed by this validator client. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200 | -### Example Response Body +Command: + +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/validators/" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq +``` + +Example Response Body ```json { @@ -304,13 +406,14 @@ Get a validator by their `voting_pubkey`. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 400 | -### Example Path +Command: -``` -localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +```bash +DATADIR=/var/lib/lighthouse +curl -X GET "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq ``` -### Example Response Body +Example Response Body ```json { @@ -323,7 +426,8 @@ localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc8 ## `PATCH /lighthouse/validators/:voting_pubkey` -Update some values for the validator with `voting_pubkey`. +Update some values for the validator with `voting_pubkey`. Possible fields: `enabled`, `gas_limit`, `builder_proposals`, +and `graffiti`. The following example updates a validator from `enabled: true` to `enabled: false`. ### HTTP Specification @@ -334,13 +438,8 @@ Update some values for the validator with `voting_pubkey`. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 400 | -### Example Path -``` -localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde -``` - -### Example Request Body +Example Request Body ```json { @@ -348,12 +447,29 @@ localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc8 } ``` +Command: + +```bash +DATADIR=/var/lib/lighthouse +curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d "{\"enabled\":false}" | jq +``` ### Example Response Body ```json null ``` +A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will log: + +``` +INFO Disabled validator voting_pubkey: 0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +INFO Modified key_cache saved successfully +``` + + ## `POST /lighthouse/validators/` Create any number of new validators, all of which will share a common mnemonic @@ -392,6 +508,28 @@ Validators are generated from the mnemonic according to ] ``` +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X POST http://localhost:5062/lighthouse/validators \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d '[ + { + "enable": true, + "description": "validator_one", + "deposit_gwei": "32000000000", + "graffiti": "Mr F was here", + "suggested_fee_recipient": "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + }, + { + "enable": false, + "description": "validator two", + "deposit_gwei": "34000000000" + } +]' | jq +``` + ### Example Response Body ```json @@ -416,6 +554,14 @@ Validators are generated from the mnemonic according to ] } } +``` + + `lighthouse vc` will log: + +``` +INFO Enabled validator voting_pubkey: 0x8ffbc881fb60841a4546b4b385ec5e9b5090fd1c4395e568d98b74b94b41a912c6101113da39d43c101369eeb9b48e50, signing_method: local_keystore +INFO Modified key_cache saved successfully +INFO Disabled validator voting_pubkey: 0xa9fadd620dc68e9fe0d6e1a69f6c54a0271ad65ab5a509e645e45c6e60ff8f4fc538f301781193a08b55821444801502 ``` ## `POST /lighthouse/validators/keystore` @@ -474,6 +620,19 @@ Import a keystore into the validator client. } ``` +We can use [JSON to String Converter](https://jsontostring.com/) so that the above data can be properly presented as a command. The command is as below: + +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X POST http://localhost:5062/lighthouse/validators/keystore \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d "{\"enable\":true,\"password\":\"mypassword\",\"keystore\":{\"crypto\":{\"kdf\":{\"function\":\"scrypt\",\"params\":{\"dklen\":32,\"n\":262144,\"r\":8,\"p\":1,\"salt\":\"445989ec2f332bb6099605b4f1562c0df017488d8d7fb3709f99ebe31da94b49\"},\"message\":\"\"},\"checksum\":{\"function\":\"sha256\",\"params\":{},\"message\":\"abadc1285fd38b24a98ac586bda5b17a8f93fc1ff0778803dc32049578981236\"},\"cipher\":{\"function\":\"aes-128-ctr\",\"params\":{\"iv\":\"65abb7e1d02eec9910d04299cc73efbe\"},\"message\":\"6b7931a4447be727a3bb5dc106d9f3c1ba50671648e522f213651d13450b6417\"}},\"uuid\":\"5cf2a1fb-dcd6-4095-9ebf-7e4ee0204cab\",\"path\":\"m/12381/3600/0/0/0\",\"pubkey\":\"b0d2f05014de27c6d7981e4a920799db1c512ee7922932be6bf55729039147cf35a090bd4ab378fe2d133c36cbbc9969\",\"version\":4,\"description\":\"\"}}" | jq +``` + +As this is an example for demonstration, the above command will return `InvalidPassword`. However, with a keystore file and correct password, running the above command will import the keystore to the validator client. An example of a success message is shown below: + ### Example Response Body ```json { @@ -484,6 +643,13 @@ Import a keystore into the validator client. } } +``` + + `lighthouse vc` will log: + +```bash +INFO Enabled validator voting_pubkey: 0xb0d2f05014de27c6d7981e4a920799db1c512ee7922932be6bf55729039147cf35a090bd4ab378fe2d133c36cbb, signing_method: local_keystore +INFO Modified key_cache saved successfully ``` ## `POST /lighthouse/validators/mnemonic` @@ -521,6 +687,16 @@ generated with the path `m/12381/3600/i/42`. } ``` +Command: + +```bash +DATADIR=/var/lib/lighthouse +curl -X POST http://localhost:5062/lighthouse/validators/mnemonic \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d '{"mnemonic":" theme onion deal plastic claim silver fancy youth lock ordinary hotel elegant balance ridge web skill burger survey demand distance legal fish salad cloth","key_derivation_path_offset":0,"validators":[{"enable":true,"description":"validator_one","deposit_gwei":"32000000000"}]}' | jq +``` + ### Example Response Body ```json @@ -537,6 +713,13 @@ generated with the path `m/12381/3600/i/42`. } ``` +`lighthouse vc` will log: + +``` +INFO Enabled validator voting_pubkey: 0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380, signing_method: local_keystore +INFO Modified key_cache saved successfully +``` + ## `POST /lighthouse/validators/web3signer` Create any number of new validators, all of which will refer to a @@ -575,6 +758,56 @@ The following fields may be omitted or nullified to obtain default values: - `root_certificate_path` - `request_timeout_ms` +Command: +```bash +DATADIR=/var/lib/lighthouse +curl -X POST http://localhost:5062/lighthouse/validators/web3signer \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d "[{\"enable\":true,\"description\":\"validator_one\",\"graffiti\":\"Mr F was here\",\"suggested_fee_recipient\":\"0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d\",\"voting_public_key\":\"0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380\",\"url\":\"http://path-to-web3signer.com\",\"request_timeout_ms\":12000}]" +``` + + ### Example Response Body -*No data is included in the response body.* + +```json +null +``` + +A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will log: + +``` +INFO Enabled validator voting_pubkey: 0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380, signing_method: remote_signer +``` + + +## `GET /lighthouse/logs` + +Provides a subscription to receive logs as Server Side Events. Currently the +logs emitted are INFO level or higher. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/logs` | +| Method | GET | +| Required Headers | None | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "time": "Mar 13 15:26:53", + "level": "INFO", + "msg": "Connected to beacon node(s)", + "service": "notifier", + "synced": 1, + "available": 1, + "total": 1 + } +} +``` diff --git a/book/src/api-vc.md b/book/src/api-vc.md index 74c493ebea..a3400016ec 100644 --- a/book/src/api-vc.md +++ b/book/src/api-vc.md @@ -14,13 +14,13 @@ signers. It also includes some Lighthouse-specific endpoints which are described ## Starting the server -A Lighthouse validator client can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5062`. +A Lighthouse validator client can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `http://127.0.0.1:5062`. The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are provided). -- `--http-address`: specify the listen address of the server. It is almost always unsafe to use a non-default HTTP listen address. Use with caution. See the **Security** section below for more information. +- `--http-address`: specify the listen address of the server. It is almost always unsafe to use a non-default HTTP listen address. Use this with caution. See the **Security** section below for more information. - `--http-port`: specify the listen port of the server. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` header. The default is to not supply a header. @@ -28,7 +28,7 @@ The following CLI flags control the HTTP server: ## Security The validator client HTTP server is **not encrypted** (i.e., it is **not HTTPS**). For -this reason, it will listen by default on `127.0.0.1`. +this reason, it will listen by default on `http://127.0.0.1`. It is unsafe to expose the validator client to the public Internet without additional transport layer security (e.g., HTTPS via nginx, SSH tunnels, etc.). diff --git a/book/src/builders.md b/book/src/builders.md index fc42f9b743..6db360d70e 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -1,4 +1,4 @@ -# MEV and Lighthouse +# Maximal Extractable Value (MEV) Lighthouse is able to interact with servers that implement the [builder API](https://github.com/ethereum/builder-specs), allowing it to produce blocks without having @@ -103,11 +103,32 @@ Each field is optional. } ``` +Command: + +```bash +DATADIR=/var/lib/lighthouse +curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d '{ + "builder_proposals": true, + "gas_limit": 30000001 +}' | jq +``` +If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"` + #### Example Response Body ```json null ``` + +A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will show a log which looks like: + +``` +INFO Published validator registrations to the builder network, count: 3, service: preparation +``` + ### Fee Recipient Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. @@ -167,9 +188,18 @@ consider using it for the chance of out-sized rewards, this flag may be useful: The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered for inclusion in a proposal. For example, if you'd only like to use an external payload for a reward of >= 0.25 ETH, you would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the -most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. Currently, -this threshold just looks at the value of the external payload. No comparison to the local payload is made, although -this feature will likely be added in the future. +most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. + +Since the [Capella](https://ethereum.org/en/history/#capella) upgrade, a comparison of the external payload and local payload will be made according to the [engine_getPayloadV2](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) API. The logic is as follows: + +``` +if local payload value >= builder payload value: + use local payload +else if builder payload value >= builder_profit_threshold or builder_profit_threshold == 0: + use builder payload +else: + use local payload +``` ## Checking your builder config diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 47dc03b20c..d5c8b18e57 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -34,7 +34,7 @@ INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e54 ``` > **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint -> against a trusted source like a friend's node, or a block explorer. +> against a trusted source like a friend's node, a block explorer or some [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/). Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. @@ -62,6 +62,10 @@ INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/ Once backfill is complete, a `INFO Historical block download complete` log will be emitted. +> Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node. + +> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. + ## FAQ 1. What if I have an existing database? How can I use checkpoint sync? diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 5e0b896359..9b60ca2e18 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -29,6 +29,7 @@ validator client or the slasher**. | v3.4.0 | Jan 2023 | v13 | yes | | v3.5.0 | Feb 2023 | v15 | yes before Capella | | v4.0.1 | Mar 2023 | v16 | yes before Capella | +| v4.2.0 | May 2023 | v17 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). @@ -82,24 +83,36 @@ on downgrades above. To check the schema version of a running Lighthouse instance you can use the HTTP API: ```bash -curl "http://localhost:5052/lighthouse/database/info" +curl "http://localhost:5052/lighthouse/database/info" | jq ``` ```json { - "schema_version": 8, + "schema_version": 16, "config": { "slots_per_restore_point": 8192, - "slots_per_restore_point_set_explicitly": true, + "slots_per_restore_point_set_explicitly": false, "block_cache_size": 5, "historic_state_cache_size": 1, "compact_on_init": false, - "compact_on_prune": true + "compact_on_prune": true, + "prune_payloads": true + }, + "split": { + "slot": "5485952", + "state_root": "0xcfe5d41e6ab5a9dab0de00d89d97ae55ecaeed3b08e4acda836e69b2bef698b4" + }, + "anchor": { + "anchor_slot": "5414688", + "oldest_block_slot": "0", + "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_upper_limit": "5414912", + "state_lower_limit": "8192" } } ``` -The `schema_version` key indicates that this database is using schema version 8. +The `schema_version` key indicates that this database is using schema version 16. Alternatively, you can check the schema version with the `lighthouse db` command. @@ -118,7 +131,7 @@ Several conditions need to be met in order to run `lighthouse db`: 2. The command must run as the user that owns the beacon node database. If you are using systemd then your beacon node might run as a user called `lighthousebeacon`. 3. The `--datadir` flag must be set to the location of the Lighthouse data directory. -4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `prater` or `sepolia`. +4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `goerli` or `sepolia`. The general form for a `lighthouse db` command is: diff --git a/book/src/faq.md b/book/src/faq.md index b42e197a00..d3e25438a7 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -1,29 +1,185 @@ # Frequently Asked Questions -- [Why does it take so long for a validator to be activated?](#why-does-it-take-so-long-for-a-validator-to-be-activated) -- [Do I need to set up any port mappings?](#do-i-need-to-set-up-any-port-mappings) -- [I have a low peer count and it is not increasing](#i-have-a-low-peer-count-and-it-is-not-increasing) -- [What should I do if I lose my slashing protection database?](#what-should-i-do-if-i-lose-my-slashing-protection-database) -- [How do I update lighthouse?](#how-do-i-update-lighthouse) -- [I can't compile lighthouse](#i-cant-compile-lighthouse) -- [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) -- [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) -- [How can I monitor my validators?](#how-can-i-monitor-my-validators) -- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#i-see-beacon-logs-showing-warn-execution-engine-called-failed-what-should-i-do) -- [How do I check or update my withdrawal credentials?](#how-do-i-check-or-update-my-withdrawal-credentials) -- [I am missing attestations. Why?](#i-am-missing-attestations-why) -- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#sometimes-i-miss-the-attestation-head-vote-resulting-in-penalty-is-this-normal) -- [My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?](#my-beacon-node-is-stuck-at-downloading-historical-block-using-checkpoing-sync-what-can-i-do) +## [Beacon Node](#beacon-node-1) +- [I see a warning about "Syncing deposit contract block cache" or an error about "updating deposit contract cache", what should I do?](#bn-deposit-contract) +- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#bn-ee) +- [My beacon node is stuck at downloading historical block using checkpoint sync. What should I do?](#bn-download-historical) +- [I proposed a block but the beacon node shows `could not publish message` with error `duplicate` as below, should I be worried?](#bn-duplicate) +- [I see beacon node logs `Head is optimistic` and I am missing attestations. What should I do?](#bn-optimistic) +- [My beacon node logs `CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout`, what should I do?](#bn-timeout) +- [My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do?](#bn-missing-beacon) +- [After checkpoint sync, the progress of `downloading historical blocks` is slow. Why?](#bn-download-slow) +- [My beacon node logs `WARN Error processing HTTP API request`, what should I do?](#bn-http) -### Why does it take so long for a validator to be activated? +## [Validator](#validator-1) +- [Why does it take so long for a validator to be activated?](#vc-activation) +- [Can I use redundancy in my staking setup?](#vc-redundancy) +- [I am missing attestations. Why?](#vc-missed-attestations) +- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#vc-head-vote) +- [Can I submit a voluntary exit message without a beacon node?](#vc-exit) +- [Does increasing the number of validators increase the CPU and other computer resources used?](#vc-resource) +- [I want to add new validators. Do I have to reimport the existing keys?](#vc-reimport) +- [Do I have to stop `lighthouse vc` the when importing new validator keys?](#vc-import) + + +## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1) +- [I have a low peer count and it is not increasing](#net-peer) +- [How do I update lighthouse?](#net-update) +- [Do I need to set up any port mappings (port forwarding)?](#net-port-forwarding) +- [How can I monitor my validators?](#net-monitor) +- [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc) +- [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip) +- [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port) + + +## [Miscellaneous](#miscellaneous-1) +- [What should I do if I lose my slashing protection database?](#misc-slashing) +- [I can't compile lighthouse](#misc-compile) +- [How do I check the version of Lighthouse that is running?](#misc-version) +- [Does Lighthouse have pruning function like the execution client to save disk space?](#misc-prune) +- [Can I use a HDD for the freezer database and only have the hot db on SSD?](#misc-freezer) + +## Beacon Node + + + +### I see a warning about "Syncing deposit contract block cache" or an error about "updating deposit contract cache", what should I do? + +The error can be a warning: + +``` +Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier +``` + +or an error: + +``` +ERRO Error updating deposit contract cache error: Failed to get remote head and new block ranges: EndpointError(FarBehind), retry_millis: 60000, service: deposit_contract_rpc +``` + +This log indicates that your beacon node is downloading blocks and deposits +from your execution node. When the `est_blocks_remaining` is +`initializing_deposits`, your node is downloading deposit logs. It may stay in +this stage for several minutes. Once the deposits logs are finished +downloading, the `est_blocks_remaining` value will start decreasing. + +It is perfectly normal to see this log when starting a node for the first time +or after being off for more than several minutes. + +If this log continues appearing during operation, it means your execution client is still syncing and it cannot provide Lighthouse the information about the deposit contract yet. What you need to do is to make sure that the execution client is up and syncing. Once the execution client is synced, the error will disappear. + +### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? + +The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: + +`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` + +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flag `--execution-timeout-multiplier 3` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: +1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. +1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. +1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. + +If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: +- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. +- The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. + +### My beacon node is stuck at downloading historical block using checkpoint sync. What should I do? + +After checkpoint forwards sync completes, the beacon node will start to download historical blocks. The log will look like: + +```bash +INFO Downloading historical blocks est_time: --, distance: 4524545 slots (89 weeks 5 days), service: slot_notifier +``` + +If the same log appears every minute and you do not see progress in downloading historical blocks, you can try one of the followings: + + - Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. + - Restart the beacon node. + + +### I proposed a block but the beacon node shows `could not publish message` with error `duplicate` as below, should I be worried? + +``` +INFO Block from HTTP API already known` +WARN Could not publish message error: Duplicate, service: libp2p +``` + +This error usually happens when users are running mev-boost. The relay will publish the block on the network before returning it back to you. After the relay published the block on the network, it will propagate through nodes, and it happens quite often that your node will receive the block from your connected peers via gossip first, before getting the block from the relay, hence the message `duplicate`. + +In short, it is nothing to worry about. + +### I see beacon node logs `Head is optimistic`, and I am missing attestations. What should I do? + +The log looks like: + +``` +WARN Head is optimistic execution_block_hash: 0x47e7555f1d4215d1ad409b1ac188b008fcb286ed8f38d3a5e8078a0af6cbd6e1, info: chain not fully verified, block and attestation production disabled until execution engine syncs, service: slot_notifier +``` + +It means the beacon node will follow the chain, but it will not be able to attest or produce blocks. This is because the execution client is not synced, so the beacon chain cannot verify the authenticity of the chain head, hence the word `optimistic`. What you need to do is to make sure that the execution client is up and syncing. Once the execution client is synced, the error will disappear. + +### My beacon node logs `CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon`, what should I do? + +An example of the log is shown below: + +``` +CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon +WARN BlockProcessingFailure outcome: ValidatorPubkeyCacheLockTimeout, msg: unexpected condition in processing block. +``` + +A `Timeout` error suggests that the computer may be overloaded at the moment, for example, the execution client is still syncing. You may use the flag `--disable-lock-timeouts` to silence this error, although it will not fix the underlying slowness. Nevertheless, this is a relatively harmless log, and the error should go away once the resources used are back to normal. + +### My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do? + +An example of the full log is shown below: + +``` +WARN BlockProcessingFailure outcome: MissingBeaconBlock(0xbdba211f8d72029554e405d8e4906690dca807d1d7b1bc8c9b88d7970f1648bc), msg: unexpected condition in processing block. +``` + +`MissingBeaconBlock` suggests that the database has corrupted. You should wipe the database and use [Checkpoint Sync](./checkpoint-sync.md) to resync the beacon chain. + +### After checkpoint sync, the progress of `downloading historical blocks` is slow. Why? + +This is a normal behaviour. Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfill sync to mitigate validator performance issues after a checkpoint sync. This is not something to worry about since backfill sync / historical data is not required for staking. However, if you opt to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node. + +### My beacon node logs `WARN Error processing HTTP API request`, what should I do? + +This warning usually comes with an http error code. Some examples are given below: + +1. The log shows: + +``` +WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs +``` + +The error is `500 Internal Server Error`. This suggests that the execution client is not synced. Once the execution client is synced, the error will disappear. + +2. The log shows: + +``` +WARN Error processing HTTP API request method: POST, path: /eth/v1/validator/duties/attester/199565, status: 503 Service Unavailable, elapsed: 96.787µs +``` + +The error is `503 Service Unavailable`. This means that the beacon node is still syncing. When this happens, the validator client will log: + +``` +ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing +``` + +This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. + +## Validator + +### Why does it take so long for a validator to be activated? After validators create their execution layer deposit transaction there are two waiting periods before they can start producing blocks and attestations: 1. Waiting for the beacon chain to recognise the execution layer block containing the - deposit (generally 4 to 7.4 hours). -1. Waiting in the queue for validator activation (generally 6.4 minutes for - every 4 validators in the queue). + deposit (generally takes ~13.6 hours). +1. Waiting in the queue for validator activation. Detailed answers below: @@ -32,33 +188,33 @@ Detailed answers below: Since the beacon chain uses the execution layer for validator on-boarding, beacon chain validators must listen to event logs from the deposit contract. Since the latest blocks of the execution chain are vulnerable to re-orgs due to minor network -partitions, beacon nodes follow the execution chain at a distance of 1,024 blocks -(~4 hours) (see -[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/validator.md#misc)). +partitions, beacon nodes follow the execution chain at a distance of 2048 blocks +(~6.8 hours) (see +[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#process-deposit)). This follow distance protects the beacon chain from on-boarding validators that are likely to be removed due to an execution chain re-org. -Now we know there's a 4 hours delay before the beacon nodes even _consider_ an +Now we know there's a 6.8 hours delay before the beacon nodes even _consider_ an execution layer block. Once they _are_ considering these blocks, there's a voting period where beacon validators vote on which execution block hash to include in the beacon chain. This -period is defined as 32 epochs (~3.4 hours, see -[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#time-parameters)). +period is defined as 64 epochs (~6.8 hours, see +[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#time-parameters)). During this voting period, each beacon block producer includes an -[`Eth1Data`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1data) +[`Eth1Data`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#eth1data) in their block which counts as a vote towards what that validator considers to be the head of the execution chain at the start of the voting period (with respect to `ETH1_FOLLOW_DISTANCE`, of course). You can see the exact voting logic -[here](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/validator.md#eth1-data). +[here](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#eth1-data). These two delays combined represent the time between an execution layer deposit being included in an execution data vote and that validator appearing in the beacon chain. -The `ETH1_FOLLOW_DISTANCE` delay causes a minimum delay of ~4 hours and +The `ETH1_FOLLOW_DISTANCE` delay causes a minimum delay of ~6.8 hours and `ETH1_VOTING_PERIOD` means that if a validator deposit happens just _before_ the start of a new voting period then they might not notice this delay at all. However, if the validator deposit happens just _after_ the start of the new -voting period the validator might have to wait ~3.4 hours for next voting -period. In times of very, very severe network issues, the network may even fail -to vote in new execution layer blocks, stopping all new validator deposits! +voting period the validator might have to wait ~6.8 hours for next voting +period. In times of very severe network issues, the network may even fail +to vote in new execution layer blocks, thus stopping all new validator deposits and causing the wait to be longer. #### 2. Waiting for a validator to be activated @@ -68,30 +224,144 @@ They will simply be forgotten by the beacon chain! But, if those parameters were correct, once the execution layer delays have elapsed and the validator appears in the beacon chain, there's _another_ delay before the validator becomes "active" (canonical definition -[here](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations. +[here](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations. Firstly, the validator won't become active until their beacon chain balance is equal to or greater than -[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#gwei-values) +[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#gwei-values) (32 ETH on mainnet, usually 3.2 ETH on testnets). Once this balance is reached, the validator must wait until the start of the next epoch (up to 6.4 minutes) for the -[`process_registry_updates`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#registry-updates) +[`process_registry_updates`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#registry-updates) routine to run. This routine activates validators with respect to a [churn -limit](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_validator_churn_limit); +limit](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#get_validator_churn_limit); it will only allow the number of validators to increase (churn) by a certain -amount. Up until there are about 330,000 validators this churn limit is set to -4 and it starts to very slowly increase as the number of validators increases -from there. - -If a new validator isn't within the churn limit from the front of the queue, +amount. If a new validator isn't within the churn limit from the front of the queue, they will need to wait another epoch (6.4 minutes) for their next chance. This -repeats until the queue is cleared. +repeats until the queue is cleared. The churn limit is summarised in the table below: -Once a validator has been activated, there's no more waiting! It's time to +
+ +| Number of active validators | Validators activated per epoch | Validators activated per day | +|-------------------|--------------------------------------------|----| +| 327679 or less | 4 | 900 | +| 327680-393215 | 5 | 1125 | +| 393216-458751 | 6 | 1350 +| 458752-524287 | 7 | 1575 +| 524288-589823 | 8| 1800 | +| 589824-655359 | 9| 2025 | +| 655360-720895 | 10 | 2250| +| 720896-786431 | 11 | 2475 | +| 786432-851967 | 12 | 2700 | +| 851968-917503 | 13 | 2925 | +| 917504-983039 | 14 | 3150 | +| 983040-1048575 | 15 | 3375 | + +
+ +For example, the number of active validators on Mainnet is about 574000 on May 2023. This means that 8 validators can be activated per epoch or 1800 per day (it is noted that the same applies to the exit queue). If, for example, there are 9000 validators waiting to be activated, this means that the waiting time can take up to 5 days. + +Once a validator has been activated, congratulations! It's time to produce blocks and attestations! -### Do I need to set up any port mappings? +### Can I use redundancy in my staking setup? + +You should **never** use duplicate/redundant validator keypairs or validator clients (i.e., don't +duplicate your JSON keystores and don't run `lighthouse vc` twice). This will lead to slashing. + +However, there are some components which can be configured with redundancy. See the +[Redundancy](./redundancy.md) guide for more information. + +### I am missing attestations. Why? +The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: +- the clock is synced +- the computer has sufficient resources and is not overloaded +- the internet is working well +- you have sufficient peers + +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurrence. + +### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? + +In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. + +### Can I submit a voluntary exit message without running a beacon node? + +Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). + +It is also noted that you can submit your BLS-to-execution-change message to update your withdrawal credentials from type `0x00` to `0x01` using the same link. + +If you would like to still use Lighthouse to submit the message, you will need to run a beacon node and an execution client. For the beacon node, you can use checkpoint sync to quickly sync the chain under a minute. On the other hand, the execution client can be syncing and *needs not be synced*. This implies that it is possible to broadcast a voluntary exit message within a short time by quickly spinning up a node. + +### Does increasing the number of validators increase the CPU and other computer resources used? + +A computer with hardware specifications stated in the [Recommended System Requirements](./installation.md#recommended-system-requirements) can run hundreds validators with only marginal increase in cpu usage. When validators are active, there is a bit of an increase in resources used from validators 0-64, because you end up subscribed to more subnets. After that, the increase in resources plateaus when the number of validators go from 64 to ~500. + +### I want to add new validators. Do I have to reimport the existing keys? + +No. You can just import new validator keys to the destination directory. If the `validator_keys` folder contains existing keys, that's fine as well because Lighthouse will skip importing existing keys. + +### Do I have to stop `lighthouse vc` when importing new validator keys? + +Generally yes. + +If you do not want to stop `lighthouse vc`, you can use the [key manager API](./api-vc-endpoints.md) to import keys. + +## Network, Monitoring and Maintenance + +### I have a low peer count and it is not increasing + +If you cannot find *ANY* peers at all, it is likely that you have incorrect +network configuration settings. Ensure that the network you wish to connect to +is correct (the beacon node outputs the network it is connecting to in the +initial boot-up log lines). On top of this, ensure that you are not using the +same `datadir` as a previous network, i.e., if you have been running the +`Goerli` testnet and are now trying to join a new network but using the same +`datadir` (the `datadir` is also printed out in the beacon node's logs on +boot-up). + +If you find yourself with a low peer count and it's not reaching the target you +expect, there are a few things to check on: + +1. Ensure that port forward was correctly set up as described [here](./advanced_networking.md#nat-traversal-port-forwarding). + +To check that the ports are forwarded, run the command: + + ```bash + curl http://localhost:5052/lighthouse/nat + ``` + +It should return `{"data":true}`. If it returns `{"data":false}`, you may want to double check if the port forward was correctly set up. + +If the ports are open, you should have incoming peers. To check that you have incoming peers, run the command: + + ```bash + curl localhost:5052/lighthouse/peers | jq '.[] | select(.peer_info.connection_direction=="Incoming")' + ``` + +If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. + +2. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 80. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. + +3. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. + + +### How do I update lighthouse? + +If you are updating to new release binaries, it will be the same process as described [here.](./installation-binaries.md) + +If you are updating by rebuilding from source, see [here.](./installation-source.md#update-lighthouse) + +If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: + +```bash +$ docker pull sigp/lighthouse:v1.0.0 +``` + +If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image) +You just need to make sure the code you have checked out is up to date. + +### Do I need to set up any port mappings (port forwarding)? It is not strictly required to open any ports for Lighthouse to connect and participate in the network. Lighthouse should work out-of-the-box. However, if @@ -116,121 +386,108 @@ peers to join and degrades the overall connectivity of the global network. For these reasons, we recommend that you make your node publicly accessible. Lighthouse supports UPnP. If you are behind a NAT with a router that supports -UPnP you can simply ensure UPnP is enabled (Lighthouse will inform you in its -initial logs if a route has been established). You can also manually set up -port mappings in your router to your local Lighthouse instance. By default, +UPnP, you can simply ensure UPnP is enabled (Lighthouse will inform you in its +initial logs if a route has been established). You can also manually [set up port mappings/port forwarding](./advanced_networking.md#how-to-open-ports) in your router to your local Lighthouse instance. By default, Lighthouse uses port 9000 for both TCP and UDP. Opening both these ports will make your Lighthouse node maximally contactable. -### I have a low peer count and it is not increasing - -If you cannot find *ANY* peers at all. It is likely that you have incorrect -testnet configuration settings. Ensure that the network you wish to connect to -is correct (the beacon node outputs the network it is connecting to in the -initial boot-up log lines). On top of this, ensure that you are not using the -same `datadir` as a previous network. I.e if you have been running the -`prater` testnet and are now trying to join a new testnet but using the same -`datadir` (the `datadir` is also printed out in the beacon node's logs on -boot-up). - -If you find yourself with a low peer count and it's not reaching the target you -expect. Try setting up the correct port forwards as described -[here](./advanced_networking.md#nat-traversal-port-forwarding). - -### What should I do if I lose my slashing protection database? - -See [here](./slashing-protection.md#misplaced-slashing-database). - -### How do I update lighthouse? - -If you are updating to new release binaries, it will be the same process as described [here.](./installation-binaries.md) - -If you are updating by rebuilding from source, see [here.](./installation-source.md#update-lighthouse) - -If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: - -```bash -$ docker pull sigp/lighthouse:v1.0.0 -``` - -If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image) -You will just also need to make sure the code you have checked out is up to date. - -### I can't compile lighthouse - -See [here.](./installation-source.md#troubleshooting) - -### What is "Syncing deposit contract block cache"? - -``` -Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier -``` - -This log indicates that your beacon node is downloading blocks and deposits -from your execution node. When the `est_blocks_remaining` is -`initializing_deposits`, your node is downloading deposit logs. It may stay in -this stage for several minutes. Once the deposits logs are finished -downloading, the `est_blocks_remaining` value will start decreasing. - -It is perfectly normal to see this log when starting a node for the first time -or after being off for more than several minutes. - -If this log continues appearing sporadically during operation, there may be an -issue with your execution client endpoint. - -### Can I use redundancy in my staking setup? - -You should **never** use duplicate/redundant validator keypairs or validator clients (i.e., don't -duplicate your JSON keystores and don't run `lighthouse vc` twice). This will lead to slashing. - -However, there are some components which can be configured with redundancy. See the -[Redundancy](./redundancy.md) guide for more information. - -### How can I monitor my validators? +### How can I monitor my validators? Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which provides logging and Prometheus/Grafana metrics for individual validators. See [Validator Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md). -### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? +### My beacon node and validator client are on different servers. How can I point the validator client to the beacon node? + +The settings are as follows: + +1. On the beacon node: + + Specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than on the `localhost`. + +1. On the validator client: + + Use the flag `--beacon-nodes` to point to the beacon node. For example, `lighthouse vc --beacon-nodes http://local_IP:5052` where `local_IP` is the local IP address of the beacon node and `5052` is the default `http-port` of the beacon node. + + You can test that the setup is working with by running the following command on the validator client host: + + ```bash + curl "http://local_IP:5052/eth/v1/node/version" + ``` + + You can refer to [Redundancy](./redundancy.md) for more information. + + It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://local_IP:5052` on the validator client. + +### Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address? +No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used). + +### How to change the TCP/UDP port 9000 that Lighthouse listens on? +Use the flag ```--port ``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```. + +## Miscellaneous + +### What should I do if I lose my slashing protection database? + +See [here](./slashing-protection.md#misplaced-slashing-database). + +### I can't compile lighthouse + +See [here.](./installation-source.md#troubleshooting) + +### How do I check the version of Lighthouse that is running? + +If you build Lighthouse from source, run `lighthouse --version`. Example of output: + +```bash +Lighthouse v4.1.0-693886b +BLS library: blst-modern +SHA256 hardware acceleration: false +Allocator: jemalloc +Specs: mainnet (true), minimal (false), gnosis (true) +``` + +If you download the binary file, navigate to the location of the directory, for example, the binary file is in `/usr/local/bin`, run `/usr/local/bin/lighthouse --version`, the example of output is the same as above. + +Alternatively, if you have Lighthouse running, on the same computer, you can run: +```bash +curl "http://127.0.0.1:5052/eth/v1/node/version" +``` + +Example of output: +```bash +{"data":{"version":"Lighthouse/v4.1.0-693886b/x86_64-linux"}} +``` +which says that the version is v4.1.0. + +### Does Lighthouse have pruning function like the execution client to save disk space? + +There is no pruning of Lighthouse database for now. However, since v4.2.0, a feature to only sync back to the weak subjectivity point (approximately 5 months) when syncing via a checkpoint sync was added. This will help to save disk space since the previous behaviour will sync back to the genesis by default. + +### Can I use a HDD for the freezer database and only have the hot db on SSD? + +Yes, you can do so by using the flag `--freezer-dir /path/to/freezer_db` in the beacon node. + + + + + + + + + + + + + -The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: -`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` -which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. There are a few reasons why this can occur: -1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. -1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. -1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. -If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: -- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. -- The service file is not stopped properly. To overcome this, make sure that the process is stop properly, e.g., during client updates. -- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used, for example: (1) reduce the cache by adding the flag `--cache 2048` (2) connect to less peers using the flag `--maxpeers 10`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. -### How do I check or update my withdrawal credentials? -Withdrawals will be available after the Capella/Shanghai upgrades on 12th April 2023. To check that if you are eligible for withdrawals, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: -- `withdrawals enabled` means you will automatically receive withdrawals to the withdrawal address that you set. -- `withdrawals not enabled` means you will need to update your withdrawal credentials from `0x00` type to `0x01` type. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). - -For the case of `withdrawals not enabled`, you can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. -### I am missing attestations. Why? -The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: -- the clock is synced -- the computer has sufficient resources and is not overloaded -- the internet is working well -- you have sufficient peers - -You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurance. - -### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? - -In general it is unavoiadable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. -### My beacon node is stuck at downloading historical block using checkpoing sync. What can I do? -Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. \ No newline at end of file diff --git a/book/src/graffiti.md b/book/src/graffiti.md index 75c2a86dd5..302f8f9679 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -29,6 +29,8 @@ Lighthouse will first search for the graffiti corresponding to the public key of ### 2. Setting the graffiti in the `validator_definitions.yml` Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal. +You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). + Below is an example of the validator_definitions.yml with validator specific graffitis: ``` --- @@ -62,3 +64,25 @@ Usage: `lighthouse bn --graffiti fortytwo` > 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client. > 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node. > 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti. + +### Set Graffiti via HTTP + +Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti +both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal +without requiring a validator client restart. + +Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification. + +#### Example Command + +```bash +DATADIR=/var/lib/lighthouse +curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d '{ + "graffiti": "Mr F was here" +}' | jq +``` + +A `null` response indicates that the request is successful. \ No newline at end of file diff --git a/book/src/imgs/full-withdrawal.png b/book/src/imgs/full-withdrawal.png index 6fa2db6a91..c16d835269 100644 Binary files a/book/src/imgs/full-withdrawal.png and b/book/src/imgs/full-withdrawal.png differ diff --git a/book/src/imgs/partial-withdrawal.png b/book/src/imgs/partial-withdrawal.png index 0bf90b91db..5d318b4e62 100644 Binary files a/book/src/imgs/partial-withdrawal.png and b/book/src/imgs/partial-withdrawal.png differ diff --git a/book/src/imgs/ui-dash-logs.png b/book/src/imgs/ui-dash-logs.png new file mode 100644 index 0000000000..3656ed5b20 Binary files /dev/null and b/book/src/imgs/ui-dash-logs.png differ diff --git a/book/src/imgs/ui-logs.png b/book/src/imgs/ui-logs.png new file mode 100644 index 0000000000..2a5c230ab1 Binary files /dev/null and b/book/src/imgs/ui-logs.png differ diff --git a/book/src/installation-source.md b/book/src/installation-source.md index b9c9df163d..1504b7ff0f 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -5,7 +5,7 @@ the instructions below, and then proceed to [Building Lighthouse](#build-lightho ## Dependencies -First, **install Rust** using [rustup](https://rustup.rs/): +First, **install Rust** using [rustup](https://rustup.rs/): ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh @@ -64,10 +64,10 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). 1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. - > Tips: + > Tips: > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run - ```bash + ```bash Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` > - To verify that Chocolatey is ready, run `choco` and it should return the version. @@ -159,13 +159,13 @@ Commonly used features include: * `gnosis`: support for the Gnosis Beacon Chain. * `portable`: support for legacy hardware. * `modern`: support for exclusively modern hardware. -* `slasher-mdbx`: support for the MDBX slasher backend. Enabled by default. -* `slasher-lmdb`: support for the LMDB slasher backend. +* `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. +* `slasher-mdbx`: support for the MDBX slasher backend. * `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. * `spec-minimal`: support for the minimal preset (useful for testing). -Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` +Default features (e.g. `slasher-lmdb`) may be opted out of using the `--no-default-features` argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. E.g. diff --git a/book/src/key-management.md b/book/src/key-management.md index 084b1fbe4c..cebd84649d 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -3,7 +3,7 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** +> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index ec9aeaaee8..acca0bbeb3 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -1,15 +1,13 @@ # Merge Migration -This document provides detail for users who want to run a Lighthouse node on post-merge Ethereum. - -> The merge occurred on mainnet in September 2022. +[The Merge](https://ethereum.org/en/roadmap/merge/) has occurred on mainnet on 15th September 2022. This document provides detail of what users need to do in the past (before The Merge) to run a Lighthouse node on a post-merge Ethereum network. This document now serves as a record of the milestone upgrade. ## Necessary Configuration There are two configuration changes required for a Lighthouse node to operate correctly throughout the merge: -1. You *must* run your own execution engine such as Geth or Nethermind alongside Lighthouse. +1. You *must* run your own execution engine such as Besu, Erigon, Geth or Nethermind alongside Lighthouse. You *must* update your `lighthouse bn` configuration to connect to the execution engine using new flags which are documented on this page in the [Connecting to an execution engine](#connecting-to-an-execution-engine) section. @@ -23,12 +21,19 @@ engine to a merge-ready version. ## When? -You must configure your node to be merge-ready before the Bellatrix fork occurs on the network -on which your node is operating. +All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred: -* **Gnosis**: the Bellatrix fork has not yet been scheduled. -* **Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has - already occurred. You must have a merge-ready configuration right now. +
+ +| Network | Bellatrix | The Merge | Remark | +|-------------------|--------------------------------------------|----|----| +| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated +| Sepolia | 20th June 2022 | 6th July 2022 | | +| Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| +| Mainnet | 6th September 2022 | 15th September 2022 | +| Gnosis| 30th November 2022 | 8th December 2022 + +
## Connecting to an execution engine @@ -42,7 +47,7 @@ present in post-merge blocks. Two new flags are used to configure this connectio If you set up an execution engine with `--execution-endpoint` then you *must* provide a JWT secret using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse -has authority to control the execution engine. +has the authority to control the execution engine. > Tip: the --execution-jwt-secret-key flag can be used instead of --execution-jwt . > This is useful, for example, for users who wish to inject the value into a Docker container without @@ -89,7 +94,7 @@ lighthouse \ beacon_node \ --http \ --execution-endpoint http://localhost:8551 - --execution-jwt ~/.ethereum/geth/jwtsecret + --execution-jwt /path/to/jwtsecret ``` The changes here are: @@ -105,8 +110,7 @@ The changes here are: not be `8551`, see their documentation for details. 3. Add the `--execution-jwt` flag. - This is the path to a file containing a 32-byte secret for authenticating the BN with the - execution engine. In this example our execution engine is Geth, so we've chosen the default - location for Geth. Your execution engine might have a different path. It is critical that both + execution engine. It is critical that both the BN and execution engine reference a file with the same value, otherwise they'll fail to communicate. @@ -118,7 +122,7 @@ a deprecation warning will be logged and Lighthouse *may* remove these flags in ### The relationship between `--eth1-endpoints` and `--execution-endpoint` Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum -"eth1" nodes (e.g., Geth, Nethermind, etc). Each beacon node (BN) can have multiple eth1 endpoints +"eth1" nodes (Besu, Erigon, Geth or Nethermind). Each beacon node (BN) can have multiple eth1 endpoints and each eth1 endpoint can have many BNs connection (many-to-many relationship). The eth1 node provides a source of truth for the [deposit contract](https://ethereum.org/en/staking/deposit-contract/) and beacon chain proposers include this @@ -129,7 +133,7 @@ achieve this. To progress through the Bellatrix upgrade nodes will need a *new* connection to an "eth1" node; `--execution-endpoint`. This connection has a few different properties. Firstly, the term "eth1 node" has been deprecated and replaced with "execution engine". Whilst "eth1 node" and "execution -engine" still refer to the same projects (Geth, Nethermind, etc) the former refers to the pre-merge +engine" still refer to the same projects (Besu, Erigon, Geth or Nethermind), the former refers to the pre-merge versions and the latter refers to post-merge versions. Secondly, there is a strict one-to-one relationship between Lighthouse and the execution engine; only one Lighthouse node can connect to one execution engine. Thirdly, it is impossible to fully verify the post-merge chain without an @@ -139,7 +143,7 @@ impossible to reliably *propose* blocks without it. Since an execution engine is a hard requirement in the post-merge chain and the execution engine contains the transaction history of the Ethereum chain, there is no longer a need for the `--eth1-endpoints` flag for information about the deposit contract. The `--execution-endpoint` can -be used for all such queries. Therefore we can say that where `--execution-endpoint` is included +be used for all such queries. Therefore we can say that where `--execution-endpoint` is included, `--eth1-endpoints` should be omitted. ## FAQ diff --git a/book/src/partial-withdrawal.md b/book/src/partial-withdrawal.md index db722d729e..e5a0a97c6c 100644 --- a/book/src/partial-withdrawal.md +++ b/book/src/partial-withdrawal.md @@ -16,7 +16,7 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? - No. The "validator sweep" occurs automatically and you can expect to receive the rewards every few days. + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). Figure below summarizes partial withdrawals. diff --git a/book/src/redundancy.md b/book/src/redundancy.md index dcd2ecdea1..77cec32537 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -8,7 +8,7 @@ There are three places in Lighthouse where redundancy is notable: 1. ❌ NOT SUPPORTED: Using a redundant execution node in `lighthouse bn --execution-endpoint` 1. ☠️ BAD: Running redundant `lighthouse vc` instances with overlapping keypairs. -I mention (3) since it is unsafe and should not be confused with the other two +We mention (3) since it is unsafe and should not be confused with the other two uses of redundancy. **Running the same validator keypair in more than one validator client (Lighthouse, or otherwise) will eventually lead to slashing.** See [Slashing Protection](./slashing-protection.md) for more information. @@ -49,6 +49,7 @@ There are a few interesting properties about the list of `--beacon-nodes`: > provided (if it is desired). It will only be used as default if no `--beacon-nodes` flag is > provided at all. + ### Configuring a redundant Beacon Node In our previous example, we listed `http://192.168.1.1:5052` as a redundant @@ -56,10 +57,9 @@ node. Apart from having sufficient resources, the backup node should have the following flags: - `--http`: starts the HTTP API server. -- `--http-address 0.0.0.0`: this allows *any* external IP address to access the - HTTP server (a firewall should be configured to deny unauthorized access to port - `5052`). This is only required if your backup node is on a different host. -- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). +- `--http-address local_IP`: where `local_IP` is the private IP address of the computer running the beacon node. This is only required if your backup beacon node is on a different host. + > Note: You could also use `--http-address 0.0.0.0`, but this allows *any* external IP address to access the HTTP server. As such, a firewall should be configured to deny unauthorized access to port `5052`. + - `--execution-endpoint`: see [Merge Migration](./merge-migration.md). - `--execution-jwt`: see [Merge Migration](./merge-migration.md). For example one could use the following command to provide a backup beacon node: @@ -67,7 +67,7 @@ For example one could use the following command to provide a backup beacon node: ```bash lighthouse bn \ --http \ - --http-address 0.0.0.0 \ + --http-address local_IP \ --execution-endpoint http://localhost:8551 \ --execution-jwt /secrets/jwt.hex ``` diff --git a/book/src/setup.md b/book/src/setup.md index a1febe4a02..ea3c5664ac 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,16 +9,18 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: -- [`ganache v7`](https://github.com/trufflesuite/ganache). This is used to +- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you - don't have `ganache` available on your `PATH` or if ganache is older than v7. + don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. - [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. - +- [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as + `libpq-devel` on some systems. +- [`docker`](https://www.docker.com/). Some tests need docker installed and **running**. ## Using `make` Commands to run the test suite are available via the `Makefile` in the diff --git a/book/src/slasher.md b/book/src/slasher.md index 61dc4b327f..41bc3baf7e 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -8,10 +8,9 @@ extra income for your validators. However it is currently only recommended for e of the immaturity of the slasher UX and the extra resources required. ## Minimum System Requirements - * Quad-core CPU * 16 GB RAM -* 256 GB solid state storage (in addition to space for the beacon node DB) +* 256 GB solid state storage (in addition to the space requirement for the beacon node DB) ## How to Run @@ -28,7 +27,7 @@ messages are filtered for relevancy, and all relevant messages are checked for s to the slasher database. You **should** run with debug logs, so that you can see the slasher's internal machinations, and -provide logs to the devs should you encounter any bugs. +provide logs to the developers should you encounter any bugs. ## Configuration @@ -47,23 +46,49 @@ directory. * Flag: `--slasher-backend NAME` * Argument: one of `mdbx`, `lmdb` or `disabled` -* Default: `mdbx` +* Default: `lmdb` for new installs, `mdbx` if an MDBX database already exists -Since Lighthouse v2.6.0 it is possible to use one of several database backends with the slasher: +It is possible to use one of several database backends with the slasher: -- MDBX (default) -- LMDB +- LMDB (default) +- MDBX The advantage of MDBX is that it performs compaction, resulting in less disk usage over time. The -disadvantage is that upstream MDBX has removed support for Windows and macOS, so Lighthouse is stuck -on an older version. If bugs are found in our pinned version of MDBX it may be deprecated in future. +disadvantage is that upstream MDBX is unstable, so Lighthouse is pinned to a specific version. +If bugs are found in our pinned version of MDBX it may be deprecated in future. -LMDB does not have compaction but is more stable upstream than MDBX. It is not currently recommended -to use the LMDB backend on Windows. +LMDB does not have compaction but is more stable upstream than MDBX. If running with the LMDB +backend on Windows it is recommended to allow extra space due to this issue: +[sigp/lighthouse#2342](https://github.com/sigp/lighthouse/issues/2342). More backends may be added in future. -### Switching Backends +#### Backend Override + +The default backend was changed from MDBX to LMDB in Lighthouse v4.3.0. + +If an MDBX database is already found on disk, then Lighthouse will try to use it. This will result +in a log at start-up: + +``` +INFO Slasher backend overriden reason: database exists, configured_backend: lmdb, overriden_backend: mdbx +``` + +If the running Lighthouse binary doesn't have the MDBX backend enabled but an existing database is +found, then a warning will be logged and Lighthouse will use the LMDB backend and create a new database: + +``` +WARN Slasher backend override failed advice: delete old MDBX database or enable MDBX backend, path: /home/user/.lighthouse/mainnet/beacon/slasher_db/mdbx.dat +``` + +In this case you should either obtain a Lighthouse binary with the MDBX backend enabled, or delete +the files for the old backend. The pre-built Lighthouse binaries and Docker images have MDBX enabled, +or if you're [building from source](./installation-source.md) you can enable the `slasher-mdbx` feature. + +To delete the files, use the `path` from the `WARN` log, and then delete the `mbdx.dat` and +`mdbx.lck` files. + +#### Switching Backends If you change database backends and want to reclaim the space used by the old backend you can delete the following files from your `slasher_db` directory: @@ -97,7 +122,7 @@ Both database backends LMDB and MDBX place a hard limit on the size of the datab file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after initialization if the limit is reached. -By default the limit is set to accommodate the default history length and around 300K validators but +By default the limit is set to accommodate the default history length and around 600K validators (with about 30% headroom) but you can set it lower if running with a reduced history length. The space required scales approximately linearly in validator count and history length, i.e. if you halve either you can halve the space required. @@ -108,7 +133,7 @@ If you want an estimate of the database size you can use this formula: 4.56 GB * (N / 256) * (V / 250000) ``` -where `V` is the validator count and `N` is the history length. +where `N` is the history length and `V` is the validator count. You should set the maximum size higher than the estimate to allow room for growth in the validator count. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index f3ece85062..44accbd143 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -103,6 +103,8 @@ client. } ``` +Command: + ```bash DATADIR=$HOME/.lighthouse/mainnet PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 @@ -115,11 +117,15 @@ curl -X POST \ http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq ``` +Note that an authorization header is required to interact with the API. This is specified with the header `-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)"` which read the API token to supply the authentication. Refer to [Authorization Header](./api-vc-auth-header.md) for more information. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. + #### Successful Response (202) ```json null ``` +A `null` response indicates that the request is successful. + ### Querying the fee recipient The same path with a `GET` request can be used to query the fee recipient for a given public key at any time. @@ -131,6 +137,8 @@ The same path with a `GET` request can be used to query the fee recipient for a | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 404 | +Command: + ```bash DATADIR=$HOME/.lighthouse/mainnet PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 @@ -163,6 +171,8 @@ This is useful if you want the fee recipient to fall back to the validator clien | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 204, 404 | +Command: + ```bash DATADIR=$HOME/.lighthouse/mainnet PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index 5b67b03b37..98f3041391 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -12,9 +12,20 @@ following configuration screen. This allows you to enter the address and ports of the associated Lighthouse Beacon node and Lighthouse Validator client. -> The Beacon Node must be run with the `--gui` flag set. To allow the browser -> to access the node beyond your local computer you also need to allow CORS in -> the http API. This can be done via `--http-allow-origin "*"`. +> The Beacon Node must be run with the `--gui` flag set. + +If you run Siren in the browser (by entering `localhost` in the browser), you will need to allow CORS in the HTTP API. This can be done by adding the flag `--http-allow-origin "*"` for both beacon node and validator client. If you would like to access Siren beyond the local computer, we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: +```bash +ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip +``` + +where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser. + +You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command: + +```bash +ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip +``` A green tick will appear once Siren is able to connect to both clients. You can specify different ports for each client by clicking on the advanced tab. @@ -33,7 +44,7 @@ The token is located in the default data directory of the validator client. The default path is `~/.lighthouse//validators/api-token.txt`. -The contents of this file for the desired valdiator client needs to be +The contents of this file for the desired validator client needs to be entered. ## Name diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index 51aa9385a4..f4cbf1c40a 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -4,13 +4,23 @@ Yes, Siren requires Lighthouse v3.5.1 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## 2. Where can I find my API token? -The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./ui-configuration.md#api-token). +The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? -If you recieve a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). ## 4. How do I change my Beacon or Validator address after logging in? -Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configurtion` action button that will redirect you back to the configuration screen where you can make appropriate changes. +Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. ## 5. Why doesn't my validator balance graph show any data? If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. + +## 4. Does Siren support reverse proxy or DNS named addresses? +Yes, if you need to access your beacon or validator from an address such as `https://merp-server:9909/eth2-vc` you should follow the following steps for configuration: +1. Toggle `https` as your protocol +2. Add your address as `merp-server/eth2-vc` +3. Add your Beacon and Validator ports as `9909` + +If you have configured it correctly you should see a green checkmark indicating Siren is now connected to your Validator Client and Beacon Node. + +If you have separate address setups for your Validator Client and Beacon Node respectively you should access the `Advance Settings` on the configuration and repeat the steps above for each address. diff --git a/book/src/ui-usage.md b/book/src/ui-usage.md index e88c4677a8..867a49a91f 100644 --- a/book/src/ui-usage.md +++ b/book/src/ui-usage.md @@ -10,7 +10,7 @@ Siren's dashboard view provides a summary of all performance and key validator m The account earnings component accumulates reward data from all registered validators providing a summation of total rewards earned while staking. Given current conversion rates, this component also converts your balance into your selected fiat currency. -Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific timeframe based on current device and network conditions. +Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific time frame based on current device and network conditions. ![](imgs/ui-account-earnings.png) @@ -33,6 +33,7 @@ By clicking on the chart component you can filter selected validators in the ren + ## Hardware Usage and Device Diagnostics The hardware usage component gathers information about the device the Beacon Node is currently running. It displays the Disk usage, CPU metrics and memory usage of the Beacon Node device. The device diagnostics component provides the sync status of the execution client and beacon node. @@ -42,6 +43,12 @@ The hardware usage component gathers information about the device the Beacon Nod +## Log Statistics + +The log statistics present an hourly combined rate of critical, warning, and error logs from the validator client and beacon node. This analysis enables informed decision-making, troubleshooting, and proactive maintenance for optimal system performance. + + + # Validator Management Siren's validator management view provides a detailed overview of all validators with options to deposit to and/or add new validators. Each validator table row displays the validator name, index, balance, rewards, status and all available actions per validator. @@ -59,3 +66,12 @@ Clicking the validator icon activates a detailed validator modal component. This Siren's settings view provides access to the application theme, version, name, device name and important external links. From the settings page users can also access the configuration screen to adjust any beacon or validator node parameters. ![](imgs/ui-settings.png) + + +# Validator and Beacon Logs + +The logs page provides users with the functionality to access and review recorded logs for both validators and beacons. Users can conveniently observe log severity, messages, timestamps, and any additional data associated with each log entry. The interface allows for seamless switching between validator and beacon log outputs, and incorporates useful features such as built-in text search and the ability to pause log feeds. + +Additionally, users can obtain log statistics, which are also available on the main dashboard, thereby facilitating a comprehensive overview of the system's log data. Please note that Siren is limited to storing and displaying only the previous 1000 log messages. This also means the text search is limited to the logs that are currently stored within Siren's limit. + +![](imgs/ui-logs.png) \ No newline at end of file diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index 6eaddcc7b0..7ce2868e9b 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -43,13 +43,12 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign Staying silent and refusing to sign messages will cause the following: - 2-3 missed attestations, incurring penalties and missed rewards. -- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards. - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). The loss of rewards and penalties incurred due to the missed duties will be very small in -dollar-values. Generally, they will equate to around one US dollar (at August 2021 figures) or about -2% of the reward for one validator for one day. Since DP costs so little but can protect a user from +dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than +1% of the reward for one validator for one day. Since DP costs so little but can protect a user from slashing, many users will consider this a worthwhile trade-off. The 2-3 epochs of missed duties will be incurred whenever the VC is started (e.g., after an update diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 0793af20db..ef81b2b751 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -8,6 +8,8 @@ These endpoints are not stable or included in the Ethereum consensus standard AP they are subject to change or removal without a change in major release version. +In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node or using the [/lighthouse/database/reconstruct API](./api-lighthouse.md#lighthousedatabasereconstruct). Once the state reconstruction process is completed, you can apply these APIs to any epoch. + ## Endpoints HTTP Path | Description | @@ -29,7 +31,7 @@ is not the case for attestations from the _previous_ epoch. ``` `epoch` query parameter | - | --------- values are calcuated here + | --------- values are calculated here | | v v Epoch: |---previous---|---current---|---next---| diff --git a/book/src/validator-web3signer.md b/book/src/validator-web3signer.md index 103f1ccb3c..00ef9a6b59 100644 --- a/book/src/validator-web3signer.md +++ b/book/src/validator-web3signer.md @@ -5,7 +5,7 @@ [Teku]: https://github.com/consensys/teku [Web3Signer] is a tool by Consensys which allows *remote signing*. Remote signing is when a -Validator Client (VC) out-sources the signing of messages to remote server (e.g., via HTTPS). This +Validator Client (VC) out-sources the signing of messages to a remote server (e.g., via HTTPS). This means that the VC does not hold the validator private keys. ## Warnings @@ -47,7 +47,7 @@ remote signer: client_identity_password: "password" ``` -When using this file, the Lighthouse VC will perform duties for the `0xa5566..` validator and defer +When using this file, the Lighthouse VC will perform duties for the `0xa5566..` validator and refer to the `https://my-remote-signer.com:1234` server to obtain any signatures. It will load a "self-signed" SSL certificate from `/home/paul/my-certificates/my-remote-signer.pem` (on the filesystem of the VC) to encrypt the communications between the VC and Web3Signer. It will use diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index d90395c07f..8d61c1770d 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -97,7 +97,26 @@ There are two types of withdrawal credentials, `0x00` and `0x01`. To check which - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. - - A varying time of "validator sweep" that can take up to 5 days (at the time of writing with ~560,000 validators on the mainnet). The "validator sweep" is the process of skimming through all validators by index number for eligible withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. + - A varying time of "validator sweep" that can take up to *n* days with *n* listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. + +
+ +| Number of eligible validators | Ideal scenario *n* | Practical scenario *n* | +|:----------------:|:---------------------:|:----:| +| 300000 | 2.60 | 2.63 | +| 400000 | 3.47 | 3.51 | +| 500000 | 4.34 | 4.38 | +| 600000 | 5.21 | 5.26 | +| 700000 | 6.08 | 6.14 | +| 800000 | 6.94 | 7.01 | +| 900000 | 7.81 | 7.89 | +| 1000000 | 8.68 | 8.77 | + +
+ +> Note: Ideal scenario assumes no block proposals are missed. This means a total of withdrawals of 7200 blocks/day * 16 withdrawals/block = 115200 withdrawals/day. Practical scenario assumes 1% of blocks are missed per day. As an example, if there are 700000 eligible validators, one would expect a waiting time of slightly more than 6 days. + + The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 7eb37a9b94..c3dd3bd193 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.1.0" +version = "4.3.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index c3d7ac48a9..d7ea5ab0b3 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -13,13 +13,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .settings(&[clap::AppSettings::ColoredHelp]) .arg( Arg::with_name("enr-address") - .value_name("IP-ADDRESS") - .help("The external IP address/ DNS address to broadcast to other peers on how to reach this node. \ - If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ - does not auto-update based on PONG responses in discovery.") + .long("enr-address") + .value_name("ADDRESS") + .help("The IP address/ DNS address to broadcast to other peers on how to reach \ + this node. If a DNS address is provided, the enr-address is set to the IP \ + address it resolves to and does not auto-update based on PONG responses in \ + discovery. Set this only if you are sure other nodes can connect to your \ + local node on this address. This will update the `ip4` or `ip6` ENR fields \ + accordingly. To update both, set this flag twice with the different values.") + .multiple(true) + .max_values(2) .required(true) - .takes_value(true) .conflicts_with("network-dir") + .takes_value(true), ) .arg( Arg::with_name("port") @@ -29,11 +35,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("9000") .takes_value(true) ) + .arg( + Arg::with_name("port6") + .long("port6") + .value_name("PORT") + .help("The UDP port to listen on over IpV6 when listening over both Ipv4 and \ + Ipv6. Defaults to 9090 when required.") + .default_value("9090") + .takes_value(true), + ) .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") - .help("The address the bootnode will listen for UDP connections.") + .help("The address the bootnode will listen for UDP communications. To listen \ + over IpV4 and IpV6 set this flag twice with the different values.\n\ + Examples:\n\ + - --listen-address '0.0.0.0' will listen over Ipv4.\n\ + - --listen-address '::' will listen over Ipv6.\n\ + - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ + Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ + multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") + .multiple(true) + .max_values(2) .default_value("0.0.0.0") .takes_value(true) ) @@ -59,6 +83,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IpV6.") + .conflicts_with("network-dir") .takes_value(true), ) .arg( @@ -77,7 +102,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("network-dir") .value_name("NETWORK_DIR") .long("network-dir") - .help("The directory which contains the enr and it's assoicated private key") + .help("The directory which contains the enr and it's associated private key") .takes_value(true) ) } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index d3ee58a907..c4e36022a8 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -2,7 +2,6 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::discovery::create_enr_builder_from_config; -use lighthouse_network::discv5::IpMode; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ discovery::{load_enr_from_disk, use_or_load_enr}, @@ -10,13 +9,12 @@ use lighthouse_network::{ }; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; -use std::net::SocketAddr; +use std::net::{SocketAddrV4, SocketAddrV6}; use std::{marker::PhantomData, path::PathBuf}; use types::EthSpec; /// A set of configuration parameters for the bootnode, established from CLI arguments. pub struct BootNodeConfig { - pub listen_socket: SocketAddr, // TODO: Generalise to multiaddr pub boot_nodes: Vec, pub local_enr: Enr, @@ -81,31 +79,6 @@ impl BootNodeConfig { network_config.discv5_config.enr_update = false; } - // the address to listen on - let listen_socket = match network_config.listen_addrs().clone() { - lighthouse_network::ListenAddress::V4(v4_addr) => { - // Set explicitly as ipv4 otherwise - network_config.discv5_config.ip_mode = IpMode::Ip4; - v4_addr.udp_socket_addr() - } - lighthouse_network::ListenAddress::V6(v6_addr) => { - // create ipv6 sockets and enable ipv4 mapped addresses. - network_config.discv5_config.ip_mode = IpMode::Ip6 { - enable_mapped_addresses: false, - }; - - v6_addr.udp_socket_addr() - } - lighthouse_network::ListenAddress::DualStack(_v4_addr, v6_addr) => { - // create ipv6 sockets and enable ipv4 mapped addresses. - network_config.discv5_config.ip_mode = IpMode::Ip6 { - enable_mapped_addresses: true, - }; - - v6_addr.udp_socket_addr() - } - }; - let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(&private_key)?; @@ -143,7 +116,7 @@ impl BootNodeConfig { let mut builder = create_enr_builder_from_config(&network_config, enable_tcp); // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { - builder.add_value("eth2", enr_fork_bytes.as_slice()); + builder.add_value("eth2", &enr_fork_bytes); } builder .build(&local_key) @@ -155,7 +128,6 @@ impl BootNodeConfig { }; Ok(BootNodeConfig { - listen_socket, boot_nodes, local_enr, local_key, @@ -170,7 +142,8 @@ impl BootNodeConfig { /// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`. #[derive(Serialize, Deserialize)] pub struct BootNodeConfigSerialization { - pub listen_socket: SocketAddr, + pub ipv4_listen_socket: Option, + pub ipv6_listen_socket: Option, // TODO: Generalise to multiaddr pub boot_nodes: Vec, pub local_enr: Enr, @@ -183,7 +156,6 @@ impl BootNodeConfigSerialization { /// relevant fields of `config` pub fn from_config_ref(config: &BootNodeConfig) -> Self { let BootNodeConfig { - listen_socket, boot_nodes, local_enr, local_key: _, @@ -191,8 +163,27 @@ impl BootNodeConfigSerialization { phantom: _, } = config; + let (ipv4_listen_socket, ipv6_listen_socket) = match discv5_config.listen_config { + lighthouse_network::discv5::ListenConfig::Ipv4 { ip, port } => { + (Some(SocketAddrV4::new(ip, port)), None) + } + lighthouse_network::discv5::ListenConfig::Ipv6 { ip, port } => { + (None, Some(SocketAddrV6::new(ip, port, 0, 0))) + } + lighthouse_network::discv5::ListenConfig::DualStack { + ipv4, + ipv4_port, + ipv6, + ipv6_port, + } => ( + Some(SocketAddrV4::new(ipv4, ipv4_port)), + Some(SocketAddrV6::new(ipv6, ipv6_port, 0, 0)), + ), + }; + BootNodeConfigSerialization { - listen_socket: *listen_socket, + ipv4_listen_socket, + ipv6_listen_socket, boot_nodes: boot_nodes.clone(), local_enr: local_enr.clone(), disable_packet_filter: !discv5_config.enable_packet_filter, diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 3f5419c2c6..3823b28726 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -10,7 +10,6 @@ use types::EthSpec; pub async fn run(config: BootNodeConfig, log: slog::Logger) { let BootNodeConfig { - listen_socket, boot_nodes, local_enr, local_key, @@ -31,7 +30,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string()); info!( log, "Configuration parameters"; - "listening_address" => %listen_socket, + "listening_address" => ?discv5_config.listen_config, "advertised_v4_address" => ?pretty_v4_socket, "advertised_v6_address" => ?pretty_v6_socket, "eth2" => eth2_field @@ -41,6 +40,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { // build the contactable multiaddr list, adding the p2p protocol info!(log, "Contact information"; "enr" => local_enr.to_base64()); + info!(log, "Enr details"; "enr" => ?local_enr); info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); // construct the discv5 server @@ -64,7 +64,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { } // start the server - if let Err(e) = discv5.start(listen_socket).await { + if let Err(e) = discv5.start().await { slog::crit!(log, "Could not start discv5 server"; "error" => %e); return; } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 2c5e7060b2..d8e1a375fd 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" types = { path = "../../consensus/types" } -reqwest = { version = "0.11.0", features = ["json","stream"] } +reqwest = { version = "0.11.0", features = ["json", "stream"] } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } ethereum_serde_utils = "0.5.0" @@ -26,7 +26,12 @@ futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } +mediatype = "0.19.13" mime = "0.3.16" +pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } + +[dev-dependencies] +tokio = { version = "1.14.0", features = ["full"] } [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.2.2", optional = true } @@ -34,4 +39,10 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] -lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] +lighthouse = [ + "proto_array", + "psutil", + "procinfo", + "store", + "slashing_protection", +] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index e03cc2e9b0..e34916beba 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -19,6 +19,7 @@ use self::types::{Error as ResponseError, *}; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; +use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; @@ -39,7 +40,7 @@ pub const CONSENSUS_VERSION_HEADER: &str = "Eth-Consensus-Version"; #[derive(Debug)] pub enum Error { /// The `reqwest` client raised an error. - Reqwest(reqwest::Error), + HttpClient(PrettyReqwestError), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -70,7 +71,7 @@ pub enum Error { impl From for Error { fn from(error: reqwest::Error) -> Self { - Error::Reqwest(error) + Error::HttpClient(error.into()) } } @@ -78,7 +79,7 @@ impl Error { /// If the error has a HTTP status code, return it. pub fn status(&self) -> Option { match self { - Error::Reqwest(error) => error.status(), + Error::HttpClient(error) => error.inner().status(), Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), @@ -218,7 +219,11 @@ impl BeaconNodeHttpClient { /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_opt(&self, url: U) -> Result, Error> { - match self.get_response(url, |b| b).await.optional()? { + match self + .get_response(url, |b| b.accept(Accept::Json)) + .await + .optional()? + { Some(response) => Ok(Some(response.json().await?)), None => Ok(None), } @@ -231,7 +236,7 @@ impl BeaconNodeHttpClient { timeout: Duration, ) -> Result, Error> { let opt_response = self - .get_response(url, |b| b.timeout(timeout)) + .get_response(url, |b| b.timeout(timeout).accept(Accept::Json)) .await .optional()?; match opt_response { @@ -274,7 +279,7 @@ impl BeaconNodeHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Into::into) } /// Perform a HTTP POST request with a custom timeout. @@ -299,7 +304,7 @@ impl BeaconNodeHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Error::from) } /// Generic POST function supporting arbitrary responses and timeouts. @@ -317,6 +322,26 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_consensus_version( + &self, + url: U, + body: &T, + timeout: Option, + fork: ForkName, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder + .header(CONSENSUS_VERSION_HEADER, fork.to_string()) + .json(body) + .send() + .await?; + ok_or_error(response).await + } + /// `GET beacon/genesis` /// /// ## Errors @@ -649,6 +674,76 @@ impl BeaconNodeHttpClient { Ok(()) } + pub fn post_beacon_blocks_v2_path( + &self, + validation_level: Option, + ) -> Result { + let mut path = self.eth_path(V2)?; + path.path_segments_mut() + .map_err(|_| Error::InvalidUrl(self.server.clone()))? + .extend(&["beacon", "blocks"]); + + path.set_query( + validation_level + .map(|v| format!("broadcast_validation={}", v)) + .as_deref(), + ); + + Ok(path) + } + + pub fn post_beacon_blinded_blocks_v2_path( + &self, + validation_level: Option, + ) -> Result { + let mut path = self.eth_path(V2)?; + path.path_segments_mut() + .map_err(|_| Error::InvalidUrl(self.server.clone()))? + .extend(&["beacon", "blinded_blocks"]); + + path.set_query( + validation_level + .map(|v| format!("broadcast_validation={}", v)) + .as_deref(), + ); + + Ok(path) + } + + /// `POST v2/beacon/blocks` + pub async fn post_beacon_blocks_v2>( + &self, + block: &SignedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version( + self.post_beacon_blocks_v2_path(validation_level)?, + block, + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + + /// `POST v2/beacon/blinded_blocks` + pub async fn post_beacon_blinded_blocks_v2( + &self, + block: &SignedBlindedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version( + self.post_beacon_blinded_blocks_v2_path(validation_level)?, + block, + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// Path for `v2/beacon/blocks` pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V2)?; @@ -982,16 +1077,14 @@ impl BeaconNodeHttpClient { /// `GET beacon/deposit_snapshot` pub async fn get_deposit_snapshot(&self) -> Result, Error> { - use ssz::Decode; let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("beacon") .push("deposit_snapshot"); - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot) - .await? - .map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) - .transpose() + self.get_opt_with_timeout::, _>(path, self.timeouts.get_deposit_snapshot) + .await + .map(|opt| opt.map(|r| r.data)) } /// `POST beacon/rewards/sync_committee` @@ -1643,7 +1736,7 @@ impl BeaconNodeHttpClient { .bytes_stream() .map(|next| match next { Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()), - Err(e) => Err(Error::Reqwest(e)), + Err(e) => Err(Error::HttpClient(e.into())), })) } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index bb933dbe12..1b4bcc0e39 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -364,12 +364,12 @@ pub struct DatabaseInfo { impl BeaconNodeHttpClient { /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_bytes_opt(&self, url: U) -> Result>, Error> { - let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + let response = self.client.get(url).send().await.map_err(Error::from)?; match ok_or_error(response).await { Ok(resp) => Ok(Some( resp.bytes() .await - .map_err(Error::Reqwest)? + .map_err(Error::from)? .into_iter() .collect::>(), )), diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index b2c8e03cd6..7bf4cf5b19 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -16,6 +16,7 @@ use std::path::Path; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; +use types::graffiti::GraffitiString; /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`). @@ -169,7 +170,7 @@ impl ValidatorClientHttpClient { .map_err(|_| Error::InvalidSignatureHeader)? .to_string(); - let body = response.bytes().await.map_err(Error::Reqwest)?; + let body = response.bytes().await.map_err(Error::from)?; let message = Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); @@ -221,7 +222,7 @@ impl ValidatorClientHttpClient { .headers(self.headers()?) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; ok_or_error(response).await } @@ -235,7 +236,7 @@ impl ValidatorClientHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Error::from) } /// Perform a HTTP GET request, returning `None` on a 404 error. @@ -265,7 +266,7 @@ impl ValidatorClientHttpClient { .json(body) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; ok_or_error(response).await } @@ -296,7 +297,7 @@ impl ValidatorClientHttpClient { .json(body) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; let response = ok_or_error(response).await?; self.signed_body(response).await?; Ok(()) @@ -315,7 +316,7 @@ impl ValidatorClientHttpClient { .json(body) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; ok_or_error(response).await } @@ -467,6 +468,7 @@ impl ValidatorClientHttpClient { enabled: Option, gas_limit: Option, builder_proposals: Option, + graffiti: Option, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -482,6 +484,7 @@ impl ValidatorClientHttpClient { enabled, gas_limit, builder_proposals, + graffiti, }, ) .await diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 5b2b3d889d..f1a91b4ef1 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -83,6 +83,9 @@ pub struct ValidatorPatchRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub graffiti: Option, } #[derive(Clone, PartialEq, Serialize, Deserialize)] diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f58dc8e2a4..5f2e1ada7b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -3,11 +3,10 @@ use crate::Error as ServerError; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; -use mime::{Mime, APPLICATION, JSON, OCTET_STREAM, STAR}; +use mediatype::{names, MediaType, MediaTypeList}; use serde::{Deserialize, Serialize}; -use std::cmp::Reverse; use std::convert::TryFrom; -use std::fmt; +use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; use std::time::Duration; pub use types::*; @@ -577,6 +576,7 @@ pub struct VersionData { pub struct SyncingData { pub is_syncing: bool, pub is_optimistic: Option, + pub el_offline: Option, pub head_slot: Slot, pub sync_distance: Slot, } @@ -1171,35 +1171,58 @@ impl FromStr for Accept { type Err = String; fn from_str(s: &str) -> Result { - let mut mimes = parse_accept(s)?; + let media_type_list = MediaTypeList::new(s); // [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2 // find the highest q-factor supported accept type - mimes.sort_by_key(|m| { - Reverse(m.get_param("q").map_or(1000_u16, |n| { - (n.as_ref().parse::().unwrap_or(0_f32) * 1000_f32) as u16 - })) - }); - mimes - .into_iter() - .find_map(|m| match (m.type_(), m.subtype()) { - (APPLICATION, OCTET_STREAM) => Some(Accept::Ssz), - (APPLICATION, JSON) => Some(Accept::Json), - (STAR, STAR) => Some(Accept::Any), - _ => None, - }) - .ok_or_else(|| "accept header is not supported".to_string()) - } -} + let mut highest_q = 0_u16; + let mut accept_type = None; -fn parse_accept(accept: &str) -> Result, String> { - accept - .split(',') - .map(|part| { - part.parse() - .map_err(|e| format!("error parsing Accept header: {}", e)) - }) - .collect() + const APPLICATION: &str = names::APPLICATION.as_str(); + const OCTET_STREAM: &str = names::OCTET_STREAM.as_str(); + const JSON: &str = names::JSON.as_str(); + const STAR: &str = names::_STAR.as_str(); + const Q: &str = names::Q.as_str(); + + media_type_list.into_iter().for_each(|item| { + if let Ok(MediaType { + ty, + subty, + suffix: _, + params, + }) = item + { + let q_accept = match (ty.as_str(), subty.as_str()) { + (APPLICATION, OCTET_STREAM) => Some(Accept::Ssz), + (APPLICATION, JSON) => Some(Accept::Json), + (STAR, STAR) => Some(Accept::Any), + _ => None, + } + .map(|item_accept_type| { + let q_val = params + .iter() + .find_map(|(n, v)| match n.as_str() { + Q => { + Some((v.as_str().parse::().unwrap_or(0_f32) * 1000_f32) as u16) + } + _ => None, + }) + .or(Some(1000_u16)); + + (q_val.unwrap(), item_accept_type) + }); + + match q_accept { + Some((q, accept)) if q > highest_q => { + highest_q = q; + accept_type = Some(accept); + } + _ => (), + } + } + }); + accept_type.ok_or_else(|| "accept header is not supported".to_string()) + } } #[derive(Debug, Serialize, Deserialize)] @@ -1237,6 +1260,50 @@ pub struct ForkChoiceNode { pub execution_block_hash: Option, } +#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum BroadcastValidation { + Gossip, + Consensus, + ConsensusAndEquivocation, +} + +impl Default for BroadcastValidation { + fn default() -> Self { + Self::Gossip + } +} + +impl Display for BroadcastValidation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Gossip => write!(f, "gossip"), + Self::Consensus => write!(f, "consensus"), + Self::ConsensusAndEquivocation => write!(f, "consensus_and_equivocation"), + } + } +} + +impl FromStr for BroadcastValidation { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "gossip" => Ok(Self::Gossip), + "consensus" => Ok(Self::Consensus), + "consensus_and_equivocation" => Ok(Self::ConsensusAndEquivocation), + _ => Err("Invalid broadcast validation level"), + } + } +} + +#[derive(Default, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct BroadcastValidationQuery { + #[serde(default)] + pub broadcast_validation: BroadcastValidation, +} + #[cfg(test)] mod tests { use super::*; @@ -1267,6 +1334,11 @@ mod tests { assert_eq!( Accept::from_str("text/plain"), Err("accept header is not supported".to_string()) - ) + ); + + assert_eq!( + Accept::from_str("application/json;message=\"Hello, world!\";q=0.3,*/*;q=0.6").unwrap(), + Accept::Any + ); } } diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index f8382c95d3..296d43b1a2 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,4 +18,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} -discv5 = "0.2.2" +discv5 = "0.3.0" \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index ca1d1e88a8..0fdc159ec2 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -38,7 +38,7 @@ BELLATRIX_FORK_VERSION: 0x02000064 BELLATRIX_FORK_EPOCH: 385536 # Capella CAPELLA_FORK_VERSION: 0x03000064 -CAPELLA_FORK_EPOCH: 18446744073709551615 +CAPELLA_FORK_EPOCH: 648704 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -86,3 +86,7 @@ PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 100 DEPOSIT_NETWORK_ID: 100 DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9 + +# Network +# --------------------------------------------------------------- +SUBNETS_PER_NODE: 4 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 196629cb8d..428a082cc0 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -1,6 +1,8 @@ # Lighthouse Team (Sigma Prime) -- enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo -- enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo +- enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I +- enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I +- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I +- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I # EF Team - enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg - enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg @@ -15,4 +17,4 @@ - enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg # Nimbus team - enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM +- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 0bbf873a3f..7b26b30a6c 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -86,3 +86,7 @@ PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Network +# --------------------------------------------------------------- +SUBNETS_PER_NODE: 2 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 69d65ca8fc..63b3d45db9 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -86,3 +86,7 @@ DEPOSIT_CHAIN_ID: 5 DEPOSIT_NETWORK_ID: 5 # Prater test deposit contract on Goerli Testnet DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b + +# Network +# --------------------------------------------------------------- +SUBNETS_PER_NODE: 2 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml index abb3b1250e..f88fbc765a 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -1 +1,5 @@ +# EF Team - enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk +- enr:-KG4QE5OIg5ThTjkzrlVF32WT_-XT14WeJtIz2zoTqLLjQhYAmJlnk4ItSoH41_2x0RX0wTFIe5GgjRzU2u7Q1fN4vADhGV0aDKQqP7o7pAAAHAyAAAAAAAAAIJpZIJ2NIJpcISlFsStiXNlY3AyNTZrMaEC-Rrd_bBZwhKpXzFCrStKp1q_HmGOewxY3KwM8ofAj_ODdGNwgiMog3VkcIIjKA +# Teku team (Consensys) +- enr:-Ly4QFoZTWR8ulxGVsWydTNGdwEESueIdj-wB6UmmjUcm-AOPxnQi7wprzwcdo7-1jBW_JxELlUKJdJES8TDsbl1EdNlh2F0dG5ldHOI__78_v2bsV-EZXRoMpA2-lATkAAAcf__________gmlkgnY0gmlwhBLYJjGJc2VjcDI1NmsxoQI0gujXac9rMAb48NtMqtSTyHIeNYlpjkbYpWJw46PmYYhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 2946572899..8489f085f4 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -74,3 +74,7 @@ PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 11155111 DEPOSIT_NETWORK_ID: 11155111 DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D + +# Network +# --------------------------------------------------------------- +SUBNETS_PER_NODE: 2 \ No newline at end of file diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index d30f45ca29..e874432fbc 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.1.0-", - fallback = "Lighthouse/v4.1.0" + prefix = "Lighthouse/v4.3.0-", + fallback = "Lighthouse/v4.3.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index e56a1a2358..b6179d9e78 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -10,6 +10,13 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] slog = "2.5.2" slog-term = "2.6.0" +tokio = { version = "1.26.0", features = ["sync"] } lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" sloggers = { version = "2.1.1", features = ["json"] } +slog-async = "2.7.0" +take_mut = "0.2.2" +parking_lot = "0.12.1" +serde = "1.0.153" +serde_json = "1.0.94" +chrono = "0.4.23" diff --git a/common/logging/src/async_record.rs b/common/logging/src/async_record.rs new file mode 100644 index 0000000000..6f998c6191 --- /dev/null +++ b/common/logging/src/async_record.rs @@ -0,0 +1,309 @@ +//! An object that can be used to pass through a channel and be cloned. It can therefore be used +//! via the broadcast channel. + +use parking_lot::Mutex; +use serde::ser::SerializeMap; +use serde::serde_if_integer128; +use serde::Serialize; +use slog::{BorrowedKV, Key, Level, OwnedKVList, Record, RecordStatic, Serializer, SingleKV, KV}; +use std::cell::RefCell; +use std::fmt; +use std::fmt::Write; +use std::sync::Arc; +use take_mut::take; + +thread_local! { + static TL_BUF: RefCell = RefCell::new(String::with_capacity(128)) +} + +/// Serialized record. +#[derive(Clone)] +pub struct AsyncRecord { + msg: String, + level: Level, + location: Box, + tag: String, + logger_values: OwnedKVList, + kv: Arc>, +} + +impl AsyncRecord { + /// Serializes a `Record` and an `OwnedKVList`. + pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { + let mut ser = ToSendSerializer::new(); + record + .kv() + .serialize(record, &mut ser) + .expect("`ToSendSerializer` can't fail"); + + AsyncRecord { + msg: fmt::format(*record.msg()), + level: record.level(), + location: Box::new(*record.location()), + tag: String::from(record.tag()), + logger_values: logger_values.clone(), + kv: Arc::new(Mutex::new(ser.finish())), + } + } + + pub fn to_json_string(&self) -> Result { + serde_json::to_string(&self).map_err(|e| format!("{:?}", e)) + } +} + +pub struct ToSendSerializer { + kv: Box, +} + +impl ToSendSerializer { + fn new() -> Self { + ToSendSerializer { kv: Box::new(()) } + } + + fn finish(self) -> Box { + self.kv + } +} + +impl Serializer for ToSendSerializer { + fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_unit(&mut self, key: Key) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); + Ok(()) + } + fn emit_none(&mut self, key: Key) -> slog::Result { + let val: Option<()> = None; + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_char(&mut self, key: Key, val: char) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + #[cfg(integer128)] + fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + #[cfg(integer128)] + fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { + let val = val.to_owned(); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { + let val = fmt::format(*val); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } +} + +impl Serialize for AsyncRecord { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + // Get the current time + let dt = chrono::Local::now().format("%b %e %T").to_string(); + + let rs = RecordStatic { + location: &self.location, + level: self.level, + tag: &self.tag, + }; + let mut map_serializer = SerdeSerializer::new(serializer)?; + + // Serialize the time and log level first + map_serializer.serialize_entry("time", &dt)?; + map_serializer.serialize_entry("level", self.level.as_short_str())?; + + let kv = self.kv.lock(); + + // Convoluted pattern to avoid binding `format_args!` to a temporary. + // See: https://stackoverflow.com/questions/56304313/cannot-use-format-args-due-to-temporary-value-is-freed-at-the-end-of-this-state + let mut f = |msg: std::fmt::Arguments| { + map_serializer.serialize_entry("msg", &msg.to_string())?; + + let record = Record::new(&rs, &msg, BorrowedKV(&(*kv))); + self.logger_values + .serialize(&record, &mut map_serializer) + .map_err(serde::ser::Error::custom)?; + record + .kv() + .serialize(&record, &mut map_serializer) + .map_err(serde::ser::Error::custom) + }; + f(format_args!("{}", self.msg))?; + map_serializer.end() + } +} + +struct SerdeSerializer { + /// Current state of map serializing: `serde::Serializer::MapState` + ser_map: S::SerializeMap, +} + +impl SerdeSerializer { + fn new(ser: S) -> Result { + let ser_map = ser.serialize_map(None)?; + Ok(SerdeSerializer { ser_map }) + } + + fn serialize_entry(&mut self, key: K, value: V) -> Result<(), S::Error> + where + K: serde::Serialize, + V: serde::Serialize, + { + self.ser_map.serialize_entry(&key, &value) + } + + /// Finish serialization, and return the serializer + fn end(self) -> Result { + self.ser_map.end() + } +} + +// NOTE: This is borrowed from slog_json +macro_rules! impl_m( + ($s:expr, $key:expr, $val:expr) => ({ + let k_s: &str = $key.as_ref(); + $s.ser_map.serialize_entry(k_s, $val) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("serde serialization error: {}", e)))?; + Ok(()) + }); +); + +impl slog::Serializer for SerdeSerializer +where + S: serde::Serializer, +{ + fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { + impl_m!(self, key, &val) + } + + fn emit_unit(&mut self, key: Key) -> slog::Result { + impl_m!(self, key, &()) + } + + fn emit_char(&mut self, key: Key, val: char) -> slog::Result { + impl_m!(self, key, &val) + } + + fn emit_none(&mut self, key: Key) -> slog::Result { + let val: Option<()> = None; + impl_m!(self, key, &val) + } + fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { + impl_m!(self, key, &val) + } + serde_if_integer128! { + fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { + impl_m!(self, key, &val) + } + } + fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { + TL_BUF.with(|buf| { + let mut buf = buf.borrow_mut(); + + buf.write_fmt(*val).unwrap(); + + let res = { || impl_m!(self, key, &*buf) }(); + buf.clear(); + res + }) + } +} diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 85c4255744..a9ad25f3f3 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -11,6 +11,10 @@ use std::time::{Duration, Instant}; pub const MAX_MESSAGE_WIDTH: usize = 40; +pub mod async_record; +mod sse_logging_components; +pub use sse_logging_components::SSELoggingComponents; + /// The minimum interval between log messages indicating that a queue is full. const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); diff --git a/common/logging/src/sse_logging_components.rs b/common/logging/src/sse_logging_components.rs new file mode 100644 index 0000000000..244d09fbd1 --- /dev/null +++ b/common/logging/src/sse_logging_components.rs @@ -0,0 +1,46 @@ +//! This module provides an implementation of `slog::Drain` that optionally writes to a channel if +//! there are subscribers to a HTTP SSE stream. + +use crate::async_record::AsyncRecord; +use slog::{Drain, OwnedKVList, Record}; +use std::panic::AssertUnwindSafe; +use std::sync::Arc; +use tokio::sync::broadcast::Sender; + +/// Default log level for SSE Events. +// NOTE: Made this a constant. Debug level seems to be pretty intense. Can make this +// configurable later if needed. +const LOG_LEVEL: slog::Level = slog::Level::Info; + +/// The components required in the HTTP API task to receive logged events. +#[derive(Clone)] +pub struct SSELoggingComponents { + /// The channel to receive events from. + pub sender: Arc>>, +} + +impl SSELoggingComponents { + /// Create a new SSE drain. + pub fn new(channel_size: usize) -> Self { + let (sender, _receiver) = tokio::sync::broadcast::channel(channel_size); + + let sender = Arc::new(AssertUnwindSafe(sender)); + SSELoggingComponents { sender } + } +} + +impl Drain for SSELoggingComponents { + type Ok = (); + type Err = &'static str; + + fn log(&self, record: &Record, logger_values: &OwnedKVList) -> Result { + if record.level().is_at_least(LOG_LEVEL) { + // Attempt to send the logs + match self.sender.send(AsyncRecord::from(record, logger_values)) { + Ok(_num_sent) => {} // Everything got sent + Err(_err) => {} // There are no subscribers, do nothing + } + } + Ok(()) + } +} diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 7b8e9ba9a8..966741ca4d 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -1,4 +1,4 @@ -///! This implements a time-based LRU cache for fast checking of duplicates +//! This implements a time-based LRU cache for fast checking of duplicates use fnv::FnvHashSet; use std::collections::VecDeque; use std::time::{Duration, Instant}; diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml new file mode 100644 index 0000000000..ca9f4812b0 --- /dev/null +++ b/common/pretty_reqwest_error/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "pretty_reqwest_error" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +reqwest = { version = "0.11.0", features = ["json","stream"] } +sensitive_url = { path = "../sensitive_url" } diff --git a/common/pretty_reqwest_error/src/lib.rs b/common/pretty_reqwest_error/src/lib.rs new file mode 100644 index 0000000000..4c605f38ae --- /dev/null +++ b/common/pretty_reqwest_error/src/lib.rs @@ -0,0 +1,62 @@ +use sensitive_url::SensitiveUrl; +use std::error::Error as StdError; +use std::fmt; + +pub struct PrettyReqwestError(reqwest::Error); + +impl PrettyReqwestError { + pub fn inner(&self) -> &reqwest::Error { + &self.0 + } +} + +impl fmt::Debug for PrettyReqwestError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(url) = self.0.url() { + if let Ok(url) = SensitiveUrl::new(url.clone()) { + write!(f, "url: {}", url)?; + } else { + write!(f, "url: unable_to_parse")?; + }; + } + + let kind = if self.0.is_builder() { + "builder" + } else if self.0.is_redirect() { + "redirect" + } else if self.0.is_status() { + "status" + } else if self.0.is_timeout() { + "timeout" + } else if self.0.is_request() { + "request" + } else if self.0.is_connect() { + "connect" + } else if self.0.is_body() { + "body" + } else if self.0.is_decode() { + "decode" + } else { + "unknown" + }; + write!(f, ", kind: {}", kind)?; + + if let Some(status) = self.0.status() { + write!(f, ", status_code: {}", status)?; + } + + if let Some(ref source) = self.0.source() { + write!(f, ", detail: {}", source)?; + } else { + write!(f, ", source: unknown")?; + } + + Ok(()) + } +} + +impl From for PrettyReqwestError { + fn from(inner: reqwest::Error) -> Self { + Self(inner) + } +} diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index b6705eb602..b6068a2dca 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -75,7 +75,7 @@ impl SensitiveUrl { SensitiveUrl::new(surl) } - fn new(full: Url) -> Result { + pub fn new(full: Url) -> Result { let mut redacted = full.clone(); redacted .path_segments_mut() diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index c2856003bf..0f43c8890f 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -ssz_types = "0.5.0" +ssz_types = "0.5.3" ethereum_hashing = "1.0.0-beta.2" ethereum_ssz_derive = "0.5.0" ethereum_ssz = "0.5.0" diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index e6c46e83e7..e60774fc86 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,10 +1,15 @@ use crate::{ForkChoiceStore, InvalidationOperation}; +use per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use slog::{crit, debug, warn, Logger}; +use slog::{crit, debug, error, warn, Logger}; use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::altair::ParticipationCache; +use state_processing::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, }; @@ -18,6 +23,7 @@ use types::{ EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; +use types::{ProgressiveBalancesCache, ProgressiveBalancesMode}; #[derive(Debug)] pub enum Error { @@ -72,7 +78,9 @@ pub enum Error { }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), ParticipationCacheBuild(BeaconStateError), + ParticipationCacheError(ParticipationCacheError), ValidatorStatuses(BeaconStateError), + ProgressiveBalancesCacheCheckFailed(String), } impl From for Error { @@ -93,6 +101,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: ParticipationCacheError) -> Self { + Error::ParticipationCacheError(e) + } +} + #[derive(Debug, Clone, Copy)] /// Controls how fork choice should behave when restoring from a persisted fork choice. pub enum ResetPayloadStatuses { @@ -174,21 +194,6 @@ impl From for Error { } } -/// Indicates whether the unrealized justification of a block should be calculated and tracked. -/// If a block has been finalized, this can be set to false. This is useful when syncing finalized -/// portions of the chain. Otherwise this should always be set to true. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum CountUnrealized { - True, - False, -} - -impl CountUnrealized { - pub fn is_true(&self) -> bool { - matches!(self, CountUnrealized::True) - } -} - /// Indicates if a block has been verified by an execution payload. /// /// There is no variant for "invalid", since such a block should never be added to fork choice. @@ -658,9 +663,17 @@ where block_delay: Duration, state: &BeaconState, payload_verification_status: PayloadVerificationStatus, + progressive_balances_mode: ProgressiveBalancesMode, spec: &ChainSpec, - count_unrealized: CountUnrealized, + log: &Logger, ) -> Result<(), Error> { + // If this block has already been processed we do not need to reprocess it. + // We check this immediately in case re-processing the block mutates some property of the + // global fork choice store, e.g. the justified checkpoints or the proposer boost root. + if self.proto_array.contains_block(&block_root) { + return Ok(()); + } + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. @@ -726,97 +739,126 @@ where )?; // Update unrealized justified/finalized checkpoints. - let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized - .is_true() + let block_epoch = block.slot().epoch(E::slots_per_epoch()); + + // If the parent checkpoints are already at the same epoch as the block being imported, + // it's impossible for the unrealized checkpoints to differ from the parent's. This + // holds true because: + // + // 1. A child block cannot have lower FFG checkpoints than its parent. + // 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`. + // 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`. + // + // This is an optimization. It should reduce the amount of times we run + // `process_justification_and_finalization` by approximately 1/3rd when the chain is + // performing optimally. + let parent_checkpoints = parent_block + .unrealized_justified_checkpoint + .zip(parent_block.unrealized_finalized_checkpoint) + .filter(|(parent_justified, parent_finalized)| { + parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch + }); + + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some(( + parent_justified, + parent_finalized, + )) = + parent_checkpoints { - let block_epoch = block.slot().epoch(E::slots_per_epoch()); + (parent_justified, parent_finalized) + } else { + let justification_and_finalization_state = match block { + BeaconBlockRef::Capella(_) + | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Altair(_) => match progressive_balances_mode { + ProgressiveBalancesMode::Disabled => { + let participation_cache = ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + )? + } + ProgressiveBalancesMode::Fast + | ProgressiveBalancesMode::Checked + | ProgressiveBalancesMode::Strict => { + let maybe_participation_cache = progressive_balances_mode + .perform_comparative_checks() + .then(|| { + ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild) + }) + .transpose()?; - // If the parent checkpoints are already at the same epoch as the block being imported, - // it's impossible for the unrealized checkpoints to differ from the parent's. This - // holds true because: - // - // 1. A child block cannot have lower FFG checkpoints than its parent. - // 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`. - // 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`. - // - // This is an optimization. It should reduce the amount of times we run - // `process_justification_and_finalization` by approximately 1/3rd when the chain is - // performing optimally. - let parent_checkpoints = parent_block - .unrealized_justified_checkpoint - .zip(parent_block.unrealized_finalized_checkpoint) - .filter(|(parent_justified, parent_finalized)| { - parent_justified.epoch == block_epoch - && parent_finalized.epoch + 1 >= block_epoch - }); - - let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = - if let Some((parent_justified, parent_finalized)) = parent_checkpoints { - (parent_justified, parent_finalized) - } else { - let justification_and_finalization_state = match block { - BeaconBlockRef::Capella(_) - | BeaconBlockRef::Merge(_) - | BeaconBlockRef::Altair(_) => { - let participation_cache = - per_epoch_processing::altair::ParticipationCache::new(state, spec) - .map_err(Error::ParticipationCacheBuild)?; - per_epoch_processing::altair::process_justification_and_finalization( + process_justification_and_finalization_from_progressive_cache::( state, - &participation_cache, - )? - } - BeaconBlockRef::Base(_) => { - let mut validator_statuses = - per_epoch_processing::base::ValidatorStatuses::new(state, spec) - .map_err(Error::ValidatorStatuses)?; - validator_statuses - .process_attestations(state) - .map_err(Error::ValidatorStatuses)?; - per_epoch_processing::base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - )? - } - }; - - ( - justification_and_finalization_state.current_justified_checkpoint(), - justification_and_finalization_state.finalized_checkpoint(), - ) - }; - - // Update best known unrealized justified & finalized checkpoints - if unrealized_justified_checkpoint.epoch - > self.fc_store.unrealized_justified_checkpoint().epoch - { - self.fc_store - .set_unrealized_justified_checkpoint(unrealized_justified_checkpoint); - } - if unrealized_finalized_checkpoint.epoch - > self.fc_store.unrealized_finalized_checkpoint().epoch - { - self.fc_store - .set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint); - } - - // If block is from past epochs, try to update store's justified & finalized checkpoints right away - if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { - self.pull_up_store_checkpoints( - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - )?; - } + maybe_participation_cache.as_ref(), + ) + .or_else(|e| { + if progressive_balances_mode != ProgressiveBalancesMode::Strict { + error!( + log, + "Processing with progressive balances cache failed"; + "info" => "falling back to the non-optimized processing method", + "error" => ?e, + ); + let participation_cache = maybe_participation_cache + .map(Ok) + .unwrap_or_else(|| ParticipationCache::new(state, spec)) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + ).map_err(Error::from) + } else { + Err(e) + } + })? + } + }, + BeaconBlockRef::Base(_) => { + let mut validator_statuses = + per_epoch_processing::base::ValidatorStatuses::new(state, spec) + .map_err(Error::ValidatorStatuses)?; + validator_statuses + .process_attestations(state) + .map_err(Error::ValidatorStatuses)?; + per_epoch_processing::base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )? + } + }; ( - Some(unrealized_justified_checkpoint), - Some(unrealized_finalized_checkpoint), + justification_and_finalization_state.current_justified_checkpoint(), + justification_and_finalization_state.finalized_checkpoint(), ) - } else { - (None, None) }; + // Update best known unrealized justified & finalized checkpoints + if unrealized_justified_checkpoint.epoch + > self.fc_store.unrealized_justified_checkpoint().epoch + { + self.fc_store + .set_unrealized_justified_checkpoint(unrealized_justified_checkpoint); + } + if unrealized_finalized_checkpoint.epoch + > self.fc_store.unrealized_finalized_checkpoint().epoch + { + self.fc_store + .set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint); + } + + // If block is from past epochs, try to update store's justified & finalized checkpoints right away + if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { + self.pull_up_store_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + )?; + } + let target_slot = block .slot() .epoch(E::slots_per_epoch()) @@ -886,8 +928,8 @@ where justified_checkpoint: state.current_justified_checkpoint(), finalized_checkpoint: state.finalized_checkpoint(), execution_status, - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, + unrealized_justified_checkpoint: Some(unrealized_justified_checkpoint), + unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), }, current_slot, )?; @@ -1520,6 +1562,92 @@ where } } +/// Process justification and finalization using progressive cache. Also performs a comparative +/// check against the `ParticipationCache` if it is supplied. +/// +/// Returns an error if the cache is not initialized or if there is a mismatch on the comparative check. +fn process_justification_and_finalization_from_progressive_cache( + state: &BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, +) -> Result, Error> +where + E: EthSpec, + T: ForkChoiceStore, +{ + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= E::genesis_epoch() + 1 { + return Ok(justification_and_finalization_state); + } + + // Load cached balances + let progressive_balances_cache: &ProgressiveBalancesCache = state.progressive_balances_cache(); + let previous_target_balance = + progressive_balances_cache.previous_epoch_target_attesting_balance()?; + let current_target_balance = + progressive_balances_cache.current_epoch_target_attesting_balance()?; + let total_active_balance = state.get_total_active_balance()?; + + if let Some(participation_cache) = maybe_participation_cache { + check_progressive_balances::( + state, + participation_cache, + previous_target_balance, + current_target_balance, + total_active_balance, + )?; + } + + weigh_justification_and_finalization( + justification_and_finalization_state, + total_active_balance, + previous_target_balance, + current_target_balance, + ) + .map_err(Error::from) +} + +/// Perform comparative checks against `ParticipationCache`, will return error if there's a mismatch. +fn check_progressive_balances( + state: &BeaconState, + participation_cache: &ParticipationCache, + cached_previous_target_balance: u64, + cached_current_target_balance: u64, + cached_total_active_balance: u64, +) -> Result<(), Error> +where + E: EthSpec, + T: ForkChoiceStore, +{ + let slot = state.slot(); + let epoch = state.current_epoch(); + + // Check previous epoch target balances + let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?; + if previous_target_balance != cached_previous_target_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Previous epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, previous_target_balance, cached_previous_target_balance) + )); + } + + // Check current epoch target balances + let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?; + if current_target_balance != cached_current_target_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Current epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, current_target_balance, cached_current_target_balance) + )); + } + + // Check current epoch total balances + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + if total_active_balance != cached_total_active_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Current epoch total active balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, total_active_balance, cached_total_active_balance) + )); + } + + Ok(()) +} + /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. /// /// This is used when persisting the state of the fork choice to disk. diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 397a2ff893..e7ca84efb3 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,9 +2,9 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView, - ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, - PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses, + AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, + InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, + QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 82bf642f18..d28210aa1b 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -12,18 +12,18 @@ use beacon_chain::{ StateSkipConfig, WhenSlotSkipped, }; use fork_choice::{ - CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, - QueuedAttestation, + ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, - Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, + Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, ProgressiveBalancesMode, + RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; -pub const VALIDATOR_COUNT: usize = 32; +pub const VALIDATOR_COUNT: usize = 64; /// Defines some delay between when an attestation is created and when it is mutated. pub enum MutationDelay { @@ -69,6 +69,24 @@ impl ForkChoiceTest { Self { harness } } + /// Creates a new tester with the specified `ProgressiveBalancesMode` and genesis from latest fork. + fn new_with_progressive_balances_mode(mode: ProgressiveBalancesMode) -> ForkChoiceTest { + // genesis with latest fork (at least altair required to test the cache) + let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .chain_config(ChainConfig { + progressive_balances_mode: mode, + ..ChainConfig::default() + }) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { harness } + } + /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where @@ -213,6 +231,39 @@ impl ForkChoiceTest { self } + /// Slash a validator from the previous epoch committee. + pub async fn add_previous_epoch_attester_slashing(self) -> Self { + let state = self.harness.get_current_state(); + let previous_epoch_shuffling = state.get_shuffling(RelativeEpoch::Previous).unwrap(); + let validator_indices = previous_epoch_shuffling + .iter() + .map(|idx| *idx as u64) + .take(1) + .collect(); + + self.harness + .add_attester_slashing(validator_indices) + .unwrap(); + + self + } + + /// Slash the proposer of a block in the previous epoch. + pub async fn add_previous_epoch_proposer_slashing(self, slots_per_epoch: u64) -> Self { + let previous_epoch_slot = self.harness.get_current_slot() - slots_per_epoch; + let previous_epoch_block = self + .harness + .chain + .block_at_slot(previous_epoch_slot, WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let proposer_index: u64 = previous_epoch_block.message().proposer_index(); + + self.harness.add_proposer_slashing(proposer_index).unwrap(); + + self + } + /// Apply `count` blocks to the chain (without attestations). pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); @@ -287,8 +338,9 @@ impl ForkChoiceTest { Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, + self.harness.chain.config.progressive_balances_mode, &self.harness.chain.spec, - CountUnrealized::True, + self.harness.logger(), ) .unwrap(); self @@ -330,8 +382,9 @@ impl ForkChoiceTest { Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, + self.harness.chain.config.progressive_balances_mode, &self.harness.chain.spec, - CountUnrealized::True, + self.harness.logger(), ) .err() .expect("on_block did not return an error"); @@ -1290,3 +1343,49 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } + +/// Checks that `ProgressiveBalancesCache` is updated correctly after an attester slashing event, +/// where the slashed validator is a target attester in previous / current epoch. +#[tokio::test] +async fn progressive_balances_cache_attester_slashing() { + ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict) + // first two epochs + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .add_previous_epoch_attester_slashing() + .await + // expect fork choice to import blocks successfully after a previous epoch attester is + // slashed, i.e. the slashed attester's balance is correctly excluded from + // the previous epoch total balance in `ProgressiveBalancesCache`. + .apply_blocks(1) + .await + // expect fork choice to import another epoch of blocks successfully - the slashed + // attester's balance should be excluded from the current epoch total balance in + // `ProgressiveBalancesCache` as well. + .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .await; +} + +/// Checks that `ProgressiveBalancesCache` is updated correctly after a proposer slashing event, +/// where the slashed validator is a target attester in previous / current epoch. +#[tokio::test] +async fn progressive_balances_cache_proposer_slashing() { + ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict) + // first two epochs + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) + .await + // expect fork choice to import blocks successfully after a previous epoch proposer is + // slashed, i.e. the slashed proposer's balance is correctly excluded from + // the previous epoch total balance in `ProgressiveBalancesCache`. + .apply_blocks(1) + .await + // expect fork choice to import another epoch of blocks successfully - the slashed + // proposer's balance should be excluded from the current epoch total balance in + // `ProgressiveBalancesCache` as well. + .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .await; +} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index c16742782c..f19cd1d29d 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -15,7 +15,7 @@ integer-sqrt = "0.1.5" itertools = "0.10.0" ethereum_ssz = "0.5.0" ethereum_ssz_derive = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } tree_hash = "0.5.0" diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 8a2e2439bb..ffe8be3a04 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -7,6 +7,7 @@ mod slash_validator; pub mod altair; pub mod base; +pub mod update_progressive_balances_cache; pub use deposit_data_tree::DepositDataTree; pub use get_attestation_participation::get_attestation_participation_flag_indices; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index d4675f5ef5..d54da43a04 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,3 +1,4 @@ +use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing; use crate::{ common::{decrease_balance, increase_balance, initiate_validator_exit}, per_block_processing::errors::BlockProcessingError, @@ -43,6 +44,8 @@ pub fn slash_validator( .safe_div(spec.min_slashing_penalty_quotient_for_state(state))?, )?; + update_progressive_balances_on_slashing(state, slashed_index)?; + // Apply proposer and whistleblower rewards let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs new file mode 100644 index 0000000000..45b5d657a6 --- /dev/null +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -0,0 +1,142 @@ +/// A collection of all functions that mutates the `ProgressiveBalancesCache`. +use crate::metrics::{ + PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, +}; +use crate::per_epoch_processing::altair::ParticipationCache; +use crate::{BlockProcessingError, EpochProcessingError}; +use lighthouse_metrics::set_gauge; +use ssz_types::VariableList; +use std::borrow::Cow; +use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; +use types::{ + is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, + ParticipationFlags, ProgressiveBalancesCache, +}; + +/// Initializes the `ProgressiveBalancesCache` cache using balance values from the +/// `ParticipationCache`. If the optional `&ParticipationCache` is not supplied, it will be computed +/// from the `BeaconState`. +pub fn initialize_progressive_balances_cache( + state: &mut BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, + spec: &ChainSpec, +) -> Result<(), BeaconStateError> { + if !is_progressive_balances_enabled(state) + || state.progressive_balances_cache().is_initialized() + { + return Ok(()); + } + + let participation_cache = match maybe_participation_cache { + Some(cache) => Cow::Borrowed(cache), + None => Cow::Owned(ParticipationCache::new(state, spec)?), + }; + + let previous_epoch_target_attesting_balance = participation_cache + .previous_epoch_target_attesting_balance_raw() + .map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?; + + let current_epoch_target_attesting_balance = participation_cache + .current_epoch_target_attesting_balance_raw() + .map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?; + + let current_epoch = state.current_epoch(); + state.progressive_balances_cache_mut().initialize( + current_epoch, + previous_epoch_target_attesting_balance, + current_epoch_target_attesting_balance, + ); + + update_progressive_balances_metrics(state.progressive_balances_cache())?; + + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` when a new target attestation has been processed. +pub fn update_progressive_balances_on_attestation( + state: &mut BeaconState, + epoch: Epoch, + validator_index: usize, +) -> Result<(), BlockProcessingError> { + if is_progressive_balances_enabled(state) { + let validator = state.get_validator(validator_index)?; + if !validator.slashed { + let validator_effective_balance = validator.effective_balance; + state + .progressive_balances_cache_mut() + .on_new_target_attestation(epoch, validator_effective_balance)?; + } + } + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` when a target attester has been slashed. +pub fn update_progressive_balances_on_slashing( + state: &mut BeaconState, + validator_index: usize, +) -> Result<(), BlockProcessingError> { + if is_progressive_balances_enabled(state) { + let previous_epoch_participation = state.previous_epoch_participation()?; + let is_previous_epoch_target_attester = + is_target_attester_in_epoch::(previous_epoch_participation, validator_index)?; + + let current_epoch_participation = state.current_epoch_participation()?; + let is_current_epoch_target_attester = + is_target_attester_in_epoch::(current_epoch_participation, validator_index)?; + + let validator_effective_balance = state.get_effective_balance(validator_index)?; + + state.progressive_balances_cache_mut().on_slashing( + is_previous_epoch_target_attester, + is_current_epoch_target_attester, + validator_effective_balance, + )?; + } + + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` on epoch transition. +pub fn update_progressive_balances_on_epoch_transition( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), EpochProcessingError> { + if is_progressive_balances_enabled(state) { + state + .progressive_balances_cache_mut() + .on_epoch_transition(spec)?; + + update_progressive_balances_metrics(state.progressive_balances_cache())?; + } + + Ok(()) +} + +pub fn update_progressive_balances_metrics( + cache: &ProgressiveBalancesCache, +) -> Result<(), BeaconStateError> { + set_gauge( + &PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + cache.previous_epoch_target_attesting_balance()? as i64, + ); + + set_gauge( + &PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + cache.current_epoch_target_attesting_balance()? as i64, + ); + + Ok(()) +} + +fn is_target_attester_in_epoch( + epoch_participation: &VariableList, + validator_index: usize, +) -> Result { + let participation_flags = epoch_participation + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + participation_flags + .has_flag(TIMELY_TARGET_FLAG_INDEX) + .map_err(|e| e.into()) +} diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 68f04b554e..ebbc8f9f31 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -92,7 +92,7 @@ pub fn initialize_beacon_state_from_eth1( } // Now that we have our validators, initialize the caches (including the committees) - state.build_all_caches(spec)?; + state.build_caches(spec)?; // Set genesis validators root for domain separation and chain versioning *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?; @@ -115,7 +115,7 @@ pub fn process_activations( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter_mut().enumerate() { let balance = balances .get(index) diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index ddfaae5640..360b007678 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -23,4 +23,15 @@ lazy_static! { "beacon_participation_prev_epoch_active_gwei_total", "Total effective balance (gwei) of validators active in the previous epoch" ); + /* + * Participation Metrics (progressive balances) + */ + pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_target_attesting_gwei_progressive_total", + "Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch" + ); + pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + "beacon_participation_curr_epoch_target_attesting_gwei_progressive_total", + "Progressive total effective balance (gwei) of validators who attested to the target in the current epoch" + ); } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 124fdf6500..b8b76a499d 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -41,6 +41,9 @@ mod verify_proposer_slashing; use crate::common::decrease_balance; use crate::StateProcessingStrategy; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_metrics, +}; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -114,6 +117,8 @@ pub fn per_block_processing>( .fork_name(spec) .map_err(BlockProcessingError::InconsistentStateFork)?; + initialize_progressive_balances_cache(state, None, spec)?; + let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. @@ -182,6 +187,10 @@ pub fn per_block_processing>( )?; } + if is_progressive_balances_enabled(state) { + update_progressive_balances_metrics(state.progressive_balances_cache())?; + } + Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 1aaf298d69..0aba1d83fa 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,6 +1,8 @@ use super::signature_sets::Error as SignatureSetError; +use crate::per_epoch_processing::altair::participation_cache; use crate::ContextError; use merkle_proof::MerkleTreeError; +use participation_cache::Error as ParticipationCacheError; use safe_arith::ArithError; use ssz::DecodeError; use types::*; @@ -83,6 +85,7 @@ pub enum BlockProcessingError { found: Hash256, }, WithdrawalCredentialsInvalid, + ParticipationCacheError(ParticipationCacheError), } impl From for BlockProcessingError { @@ -140,6 +143,12 @@ impl From> for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ParticipationCacheError) -> Self { + BlockProcessingError::ParticipationCacheError(e) + } +} + /// A conversion that consumes `self` and adds an `index` variable to resulting struct. /// /// Used here to allow converting an error into an upstream error that points to the object that diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 4bee596615..1dbcb7fb8f 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -97,6 +97,8 @@ pub mod base { pub mod altair { use super::*; + use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; + use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; pub fn process_attestations( state: &mut BeaconState, @@ -163,6 +165,14 @@ pub mod altair { get_base_reward(state, index, base_reward_per_increment, spec)? .safe_mul(weight)?, )?; + + if flag_index == TIMELY_TARGET_FLAG_INDEX { + update_progressive_balances_on_attestation( + state, + data.target.epoch, + index, + )?; + } } } } @@ -235,6 +245,7 @@ pub fn process_attester_slashings( Ok(()) } + /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. pub fn process_attestations>( diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index d5df2fc975..0abbd16a98 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -1,4 +1,7 @@ use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, +}; use crate::per_epoch_processing::{ effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, @@ -31,6 +34,7 @@ pub fn process_epoch( // Pre-compute participating indices and total balances. let participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); + initialize_progressive_balances_cache::(state, Some(&participation_cache), spec)?; // Justification and finalization. let justification_and_finalization_state = @@ -56,7 +60,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, Some(&participation_cache), spec)?; // Reset slashings process_slashings_reset(state)?; @@ -75,6 +79,8 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; + update_progressive_balances_on_epoch_transition(state, spec)?; + Ok(EpochProcessingSummary::Altair { participation_cache, sync_committee, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index 004726923e..a5caddd045 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -11,49 +11,23 @@ //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! to get useful summaries about the validator participation in an epoch. -use safe_arith::{ArithError, SafeArith}; use types::{ consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, + Balance, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, + RelativeEpoch, }; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum Error { InvalidFlagIndex(usize), InvalidValidatorIndex(usize), } -/// A balance which will never be below the specified `minimum`. -/// -/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. -#[derive(PartialEq, Debug, Clone, Copy)] -struct Balance { - raw: u64, - minimum: u64, -} - -impl Balance { - /// Initialize the balance to `0`, or the given `minimum`. - pub fn zero(minimum: u64) -> Self { - Self { raw: 0, minimum } - } - - /// Returns the balance with respect to the initialization `minimum`. - pub fn get(&self) -> u64 { - std::cmp::max(self.raw, self.minimum) - } - - /// Add-assign to the balance. - pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { - self.raw.safe_add_assign(other) - } -} - /// Caches the participation values for one epoch (either the previous or current). -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] struct SingleEpochParticipationCache { /// Maps an active validator index to their participation flags. /// @@ -95,6 +69,14 @@ impl SingleEpochParticipationCache { .ok_or(Error::InvalidFlagIndex(flag_index)) } + /// Returns the raw total balance of attesters who have `flag_index` set. + fn total_flag_balance_raw(&self, flag_index: usize) -> Result { + self.total_flag_balances + .get(flag_index) + .copied() + .ok_or(Error::InvalidFlagIndex(flag_index)) + } + /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. /// /// ## Errors @@ -173,7 +155,7 @@ impl SingleEpochParticipationCache { } /// Maintains a cache to be used during `altair::process_epoch`. -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct ParticipationCache { current_epoch: Epoch, /// Caches information about active validators pertaining to `self.current_epoch`. @@ -291,6 +273,11 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } + pub fn current_epoch_target_attesting_balance_raw(&self) -> Result { + self.current_epoch_participation + .total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX) + } + pub fn previous_epoch_total_active_balance(&self) -> u64 { self.previous_epoch_participation.total_active_balance.get() } @@ -300,6 +287,11 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } + pub fn previous_epoch_target_attesting_balance_raw(&self) -> Result { + self.previous_epoch_participation + .total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX) + } + pub fn previous_epoch_source_attesting_balance(&self) -> Result { self.previous_epoch_participation .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index cb7e7d4b30..680563ce74 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -52,7 +52,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, None, spec)?; // Reset slashings process_slashings_reset(state)?; diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs index aaf301f29e..911510ed0c 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -11,6 +11,9 @@ use crate::per_epoch_processing::{ }; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, +}; pub use historical_summaries_update::process_historical_summaries_update; mod historical_summaries_update; @@ -27,6 +30,7 @@ pub fn process_epoch( // Pre-compute participating indices and total balances. let participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); + initialize_progressive_balances_cache(state, Some(&participation_cache), spec)?; // Justification and finalization. let justification_and_finalization_state = @@ -52,7 +56,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, Some(&participation_cache), spec)?; // Reset slashings process_slashings_reset(state)?; @@ -71,6 +75,8 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; + update_progressive_balances_on_epoch_transition(state, spec)?; + Ok(EpochProcessingSummary::Altair { participation_cache, sync_committee, diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index c166667b5a..1759f7e140 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -1,11 +1,13 @@ use super::errors::EpochProcessingError; +use crate::per_epoch_processing::altair::ParticipationCache; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::chain_spec::ChainSpec; -use types::{BeaconStateError, EthSpec}; +use types::{BeaconStateError, EthSpec, ProgressiveBalancesCache}; pub fn process_effective_balance_updates( state: &mut BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { let hysteresis_increment = spec @@ -13,7 +15,8 @@ pub fn process_effective_balance_updates( .safe_div(spec.hysteresis_quotient)?; let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, progressive_balances_cache) = + state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter_mut().enumerate() { let balance = balances .get(index) @@ -23,11 +26,43 @@ pub fn process_effective_balance_updates( if balance.safe_add(downward_threshold)? < validator.effective_balance || validator.effective_balance.safe_add(upward_threshold)? < balance { - validator.effective_balance = std::cmp::min( + let old_effective_balance = validator.effective_balance; + let new_effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); + + if let Some(participation_cache) = maybe_participation_cache { + update_progressive_balances( + participation_cache, + progressive_balances_cache, + index, + old_effective_balance, + new_effective_balance, + )?; + } + + validator.effective_balance = new_effective_balance; } } Ok(()) } + +fn update_progressive_balances( + participation_cache: &ParticipationCache, + progressive_balances_cache: &mut ProgressiveBalancesCache, + index: usize, + old_effective_balance: u64, + new_effective_balance: u64, +) -> Result<(), EpochProcessingError> { + if old_effective_balance != new_effective_balance { + let is_current_epoch_target_attester = + participation_cache.is_current_epoch_timely_target_attester(index)?; + progressive_balances_cache.on_effective_balance_change( + is_current_epoch_target_attester, + old_effective_balance, + new_effective_balance, + )?; + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 6d5342cd36..2d595491c1 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -16,7 +16,7 @@ pub fn process_slashings( total_balance, ); - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter().enumerate() { if validator.slashed && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 176f1af15c..26b1192bc1 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -1,3 +1,4 @@ +use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; use std::mem; use std::sync::Arc; @@ -101,6 +102,7 @@ pub fn upgrade_to_altair( next_sync_committee: temp_sync_committee, // not read // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), @@ -110,6 +112,8 @@ pub fn upgrade_to_altair( // Fill in previous epoch participation from the pre state's pending attestations. translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?; + initialize_progressive_balances_cache(&mut post, None, spec)?; + // Fill in sync committees // Note: A duplicate committee is assigned for the current and next committee at the fork // boundary diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 3b933fac37..5153e35f44 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -62,6 +62,7 @@ pub fn upgrade_to_capella( historical_summaries: VariableList::default(), // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index c172466248..eb74450107 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -60,6 +60,7 @@ pub fn upgrade_to_bellatrix( latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 91ad3089f1..ba15f6d488 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -27,7 +27,7 @@ serde_derive = "1.0.116" slog = "2.5.2" ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] } ethereum_ssz_derive = "0.5.0" -ssz_types = { version = "0.5.0", features = ["arbitrary"] } +ssz_types = { version = "0.5.3", features = ["arbitrary"] } swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } tree_hash = { version = "0.5.0", features = ["arbitrary"] } @@ -52,6 +52,7 @@ serde_json = "1.0.74" smallvec = "1.8.0" serde_with = "1.13.0" maplit = "1.0.2" +strum = { version = "0.24.0", features = ["derive"] } [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 28f57e7080..bb2b527109 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -51,7 +51,7 @@ fn all_benches(c: &mut Criterion) { let spec = Arc::new(MainnetEthSpec::default_spec()); let mut state = get_state::(validator_count); - state.build_all_caches(&spec).expect("should build caches"); + state.build_caches(&spec).expect("should build caches"); let state_bytes = state.as_ssz_bytes(); let inner_state = state.clone(); diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml index 913c2956ba..fb36f94634 100644 --- a/consensus/types/presets/gnosis/capella.yaml +++ b/consensus/types/presets/gnosis/capella.yaml @@ -9,9 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # 2**4 (= 16) withdrawals -MAX_WITHDRAWALS_PER_PAYLOAD: 16 +MAX_WITHDRAWALS_PER_PAYLOAD: 8 # Withdrawals processing # --------------------------------------------------------------- # 2**14 (= 16384) validators -MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 8192 \ No newline at end of file diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index c0ba869410..dce1be742f 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -89,7 +89,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, } } -impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 4a9da36404..1fa4dee3a0 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -26,6 +26,8 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +pub use crate::beacon_state::balance::Balance; +pub use crate::beacon_state::progressive_balances_cache::*; use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; @@ -34,9 +36,11 @@ pub use tree_hash_cache::BeaconTreeHashCache; #[macro_use] mod committee_cache; +mod balance; mod clone_config; mod exit_cache; mod iter; +mod progressive_balances_cache; mod pubkey_cache; mod tests; mod tree_hash_cache; @@ -101,6 +105,9 @@ pub enum Error { SszTypesError(ssz_types::Error), TreeHashCacheNotInitialized, NonLinearTreeHashCacheHistory, + ParticipationCacheError(String), + ProgressiveBalancesCacheNotInitialized, + ProgressiveBalancesCacheInconsistent, TreeHashCacheSkippedSlot { cache: Slot, state: Slot, @@ -317,6 +324,12 @@ where #[tree_hash(skip_hashing)] #[test_random(default)] #[derivative(Clone(clone_with = "clone_default"))] + pub progressive_balances_cache: ProgressiveBalancesCache, + #[serde(skip_serializing, skip_deserializing)] + #[ssz(skip_serializing, skip_deserializing)] + #[tree_hash(skip_hashing)] + #[test_random(default)] + #[derivative(Clone(clone_with = "clone_default"))] pub committee_caches: [CommitteeCache; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -393,6 +406,7 @@ impl BeaconState { // Caching (not in spec) total_active_balance: None, + progressive_balances_cache: <_>::default(), committee_caches: [ CommitteeCache::default(), CommitteeCache::default(), @@ -757,7 +771,7 @@ impl BeaconState { Ok(signature_hash_int.safe_rem(modulo)? == 0) } - /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. + /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v0.12.1 pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { @@ -1150,12 +1164,30 @@ impl BeaconState { } /// Convenience accessor for validators and balances simultaneously. - pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) { + pub fn validators_and_balances_and_progressive_balances_mut( + &mut self, + ) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) { match self { - BeaconState::Base(state) => (&mut state.validators, &mut state.balances), - BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), - BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), - BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), + BeaconState::Base(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Altair(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Merge(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Capella(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), } } @@ -1380,7 +1412,7 @@ impl BeaconState { } /// Build all caches (except the tree hash cache), if they need to be built. - pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -1412,6 +1444,7 @@ impl BeaconState { self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); self.drop_tree_hash_cache(); + self.drop_progressive_balances_cache(); *self.exit_cache_mut() = ExitCache::default(); Ok(()) } @@ -1608,6 +1641,11 @@ impl BeaconState { *self.pubkey_cache_mut() = PubkeyCache::default() } + /// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache. + fn drop_progressive_balances_cache(&mut self) { + *self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default(); + } + /// Initialize but don't fill the tree hash cache, if it isn't already initialized. pub fn initialize_tree_hash_cache(&mut self) { if !self.tree_hash_cache().is_initialized() { @@ -1679,6 +1717,9 @@ impl BeaconState { if config.tree_hash_cache { *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); } + if config.progressive_balances_cache { + *res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone(); + } res } diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/beacon_state/balance.rs new file mode 100644 index 0000000000..e537a5b984 --- /dev/null +++ b/consensus/types/src/beacon_state/balance.rs @@ -0,0 +1,33 @@ +use arbitrary::Arbitrary; +use safe_arith::{ArithError, SafeArith}; + +/// A balance which will never be below the specified `minimum`. +/// +/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. +#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)] +pub struct Balance { + raw: u64, + minimum: u64, +} + +impl Balance { + /// Initialize the balance to `0`, or the given `minimum`. + pub fn zero(minimum: u64) -> Self { + Self { raw: 0, minimum } + } + + /// Returns the balance with respect to the initialization `minimum`. + pub fn get(&self) -> u64 { + std::cmp::max(self.raw, self.minimum) + } + + /// Add-assign to the balance. + pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_add_assign(other) + } + + /// Sub-assign to the balance. + pub fn safe_sub_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_sub_assign(other) + } +} diff --git a/consensus/types/src/beacon_state/clone_config.rs b/consensus/types/src/beacon_state/clone_config.rs index e5f050aee6..c6e7f47421 100644 --- a/consensus/types/src/beacon_state/clone_config.rs +++ b/consensus/types/src/beacon_state/clone_config.rs @@ -5,6 +5,7 @@ pub struct CloneConfig { pub pubkey_cache: bool, pub exit_cache: bool, pub tree_hash_cache: bool, + pub progressive_balances_cache: bool, } impl CloneConfig { @@ -14,6 +15,7 @@ impl CloneConfig { pubkey_cache: true, exit_cache: true, tree_hash_cache: true, + progressive_balances_cache: true, } } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs new file mode 100644 index 0000000000..9f5c223d57 --- /dev/null +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -0,0 +1,184 @@ +use crate::beacon_state::balance::Balance; +use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec}; +use arbitrary::Arbitrary; +use safe_arith::SafeArith; +use serde_derive::{Deserialize, Serialize}; +use strum::{Display, EnumString, EnumVariantNames}; + +/// This cache keeps track of the accumulated target attestation balance for the current & previous +/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification +/// and finalization instead of converting epoch participation arrays to balances for each block we +/// process. +#[derive(Default, Debug, PartialEq, Arbitrary, Clone)] +pub struct ProgressiveBalancesCache { + inner: Option, +} + +#[derive(Debug, PartialEq, Arbitrary, Clone)] +struct Inner { + pub current_epoch: Epoch, + pub previous_epoch_target_attesting_balance: Balance, + pub current_epoch_target_attesting_balance: Balance, +} + +impl ProgressiveBalancesCache { + pub fn initialize( + &mut self, + current_epoch: Epoch, + previous_epoch_target_attesting_balance: Balance, + current_epoch_target_attesting_balance: Balance, + ) { + self.inner = Some(Inner { + current_epoch, + previous_epoch_target_attesting_balance, + current_epoch_target_attesting_balance, + }); + } + + pub fn is_initialized(&self) -> bool { + self.inner.is_some() + } + + /// When a new target attestation has been processed, we update the cached + /// `current_epoch_target_attesting_balance` to include the validator effective balance. + /// If the epoch is neither the current epoch nor the previous epoch, an error is returned. + pub fn on_new_target_attestation( + &mut self, + epoch: Epoch, + validator_effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + + if epoch == cache.current_epoch { + cache + .current_epoch_target_attesting_balance + .safe_add_assign(validator_effective_balance)?; + } else if epoch.safe_add(1)? == cache.current_epoch { + cache + .previous_epoch_target_attesting_balance + .safe_add_assign(validator_effective_balance)?; + } else { + return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent); + } + + Ok(()) + } + + /// When a validator is slashed, we reduce the `current_epoch_target_attesting_balance` by the + /// validator's effective balance to exclude the validator weight. + pub fn on_slashing( + &mut self, + is_previous_epoch_target_attester: bool, + is_current_epoch_target_attester: bool, + effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + if is_previous_epoch_target_attester { + cache + .previous_epoch_target_attesting_balance + .safe_sub_assign(effective_balance)?; + } + if is_current_epoch_target_attester { + cache + .current_epoch_target_attesting_balance + .safe_sub_assign(effective_balance)?; + } + Ok(()) + } + + /// When a current epoch target attester has its effective balance changed, we adjust the + /// its share of the target attesting balance in the cache. + pub fn on_effective_balance_change( + &mut self, + is_current_epoch_target_attester: bool, + old_effective_balance: u64, + new_effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + if is_current_epoch_target_attester { + if new_effective_balance > old_effective_balance { + cache + .current_epoch_target_attesting_balance + .safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?; + } else { + cache + .current_epoch_target_attesting_balance + .safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?; + } + } + Ok(()) + } + + /// On epoch transition, the balance from current epoch is shifted to previous epoch, and the + /// current epoch balance is reset to 0. + pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + cache.current_epoch.safe_add_assign(1)?; + cache.previous_epoch_target_attesting_balance = + cache.current_epoch_target_attesting_balance; + cache.current_epoch_target_attesting_balance = + Balance::zero(spec.effective_balance_increment); + Ok(()) + } + + pub fn previous_epoch_target_attesting_balance(&self) -> Result { + Ok(self + .get_inner()? + .previous_epoch_target_attesting_balance + .get()) + } + + pub fn current_epoch_target_attesting_balance(&self) -> Result { + Ok(self + .get_inner()? + .current_epoch_target_attesting_balance + .get()) + } + + fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> { + self.inner + .as_mut() + .ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized) + } + + fn get_inner(&self) -> Result<&Inner, BeaconStateError> { + self.inner + .as_ref() + .ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized) + } +} + +#[derive( + Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum ProgressiveBalancesMode { + /// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation. + Disabled, + /// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls + /// back to the existing calculation if there is a balance mismatch. + Checked, + /// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors + /// if there is a balance mismatch. Used in testing only. + Strict, + /// Enable the usage of progressive cache, with no comparative checks against the + /// `ParticipationCache`. This is fast but an experimental mode, use with caution. + Fast, +} + +impl ProgressiveBalancesMode { + pub fn perform_comparative_checks(&self) -> bool { + match self { + Self::Disabled | Self::Fast => false, + Self::Checked | Self::Strict => true, + } + } +} + +/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`. +pub fn is_progressive_balances_enabled(state: &BeaconState) -> bool { + match state { + BeaconState::Base(_) => false, + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true, + } +} diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index d63eaafc4b..6cd9c1dbf8 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -219,17 +219,18 @@ async fn clone_config() { let mut state = build_state::(16).await; - state.build_all_caches(&spec).unwrap(); + state.build_caches(&spec).unwrap(); state .update_tree_hash_cache() .expect("should update tree hash cache"); - let num_caches = 4; + let num_caches = 5; let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { committee_caches: (i & 1) != 0, pubkey_cache: ((i >> 1) & 1) != 0, exit_cache: ((i >> 2) & 1) != 0, tree_hash_cache: ((i >> 3) & 1) != 0, + progressive_balances_cache: ((i >> 4) & 1) != 0, }); for config in all_configs { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 163b07dcd1..5957182230 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -168,11 +168,9 @@ pub struct ChainSpec { pub maximum_gossip_clock_disparity_millis: u64, pub target_aggregators_per_committee: u64, pub attestation_subnet_count: u64, - pub random_subnets_per_validator: u64, - pub epochs_per_random_subnet_subscription: u64, pub subnets_per_node: u8, pub epochs_per_subnet_subscription: u64, - attestation_subnet_extra_bits: u8, + pub attestation_subnet_extra_bits: u8, /* * Application params @@ -455,17 +453,7 @@ impl ChainSpec { #[allow(clippy::integer_arithmetic)] pub const fn attestation_subnet_prefix_bits(&self) -> u32 { - // maybe use log2 when stable https://github.com/rust-lang/rust/issues/70887 - - // NOTE: this line is here simply to guarantee that if self.attestation_subnet_count type - // is changed, a compiler warning will be raised. This code depends on the type being u64. - let attestation_subnet_count: u64 = self.attestation_subnet_count; - let attestation_subnet_count_bits = if attestation_subnet_count == 0 { - 0 - } else { - 63 - attestation_subnet_count.leading_zeros() - }; - + let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2(); self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits } @@ -625,13 +613,11 @@ impl ChainSpec { network_id: 1, // mainnet network id attestation_propagation_slot_range: 32, attestation_subnet_count: 64, - random_subnets_per_validator: 1, - subnets_per_node: 1, + subnets_per_node: 2, maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, - epochs_per_random_subnet_subscription: 256, epochs_per_subnet_subscription: 256, - attestation_subnet_extra_bits: 6, + attestation_subnet_extra_bits: 0, /* * Application specific @@ -842,8 +828,8 @@ impl ChainSpec { * Capella hard fork params */ capella_fork_version: [0x03, 0x00, 0x00, 0x64], - capella_fork_epoch: None, - max_validators_per_withdrawals_sweep: 16384, + capella_fork_epoch: Some(Epoch::new(648704)), + max_validators_per_withdrawals_sweep: 8192, /* * Network specific @@ -852,13 +838,11 @@ impl ChainSpec { network_id: 100, // Gnosis Chain network id attestation_propagation_slot_range: 32, attestation_subnet_count: 64, - random_subnets_per_validator: 1, - subnets_per_node: 1, + subnets_per_node: 4, // Make this larger than usual to avoid network damage maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, - epochs_per_random_subnet_subscription: 256, epochs_per_subnet_subscription: 256, - attestation_subnet_extra_bits: 6, + attestation_subnet_extra_bits: 0, /* * Application specific @@ -946,6 +930,9 @@ pub struct Config { shard_committee_period: u64, #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, + #[serde(default = "default_subnets_per_node")] + #[serde(with = "serde_utils::quoted_u8")] + subnets_per_node: u8, #[serde(with = "serde_utils::quoted_u64")] inactivity_score_bias: u64, @@ -1002,6 +989,10 @@ fn default_safe_slots_to_import_optimistically() -> u64 { 128u64 } +fn default_subnets_per_node() -> u8 { + 2u8 +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -1084,6 +1075,7 @@ impl Config { min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay, shard_committee_period: spec.shard_committee_period, eth1_follow_distance: spec.eth1_follow_distance, + subnets_per_node: spec.subnets_per_node, inactivity_score_bias: spec.inactivity_score_bias, inactivity_score_recovery_rate: spec.inactivity_score_recovery_rate, @@ -1130,6 +1122,7 @@ impl Config { min_validator_withdrawability_delay, shard_committee_period, eth1_follow_distance, + subnets_per_node, inactivity_score_bias, inactivity_score_recovery_rate, ejection_balance, @@ -1162,6 +1155,7 @@ impl Config { min_validator_withdrawability_delay, shard_committee_period, eth1_follow_distance, + subnets_per_node, inactivity_score_bias, inactivity_score_recovery_rate, ejection_balance, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index b10ad7557b..01f86d3480 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -86,10 +86,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "domain_application_mask".to_uppercase()=> u32_hex(spec.domain_application_mask), "target_aggregators_per_committee".to_uppercase() => spec.target_aggregators_per_committee.to_string().into(), - "random_subnets_per_validator".to_uppercase() => - spec.random_subnets_per_validator.to_string().into(), - "epochs_per_random_subnet_subscription".to_uppercase() => - spec.epochs_per_random_subnet_subscription.to_string().into(), "domain_contribution_and_proof".to_uppercase() => u32_hex(spec.domain_contribution_and_proof), "domain_sync_committee".to_uppercase() => u32_hex(spec.domain_sync_committee), diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index aea4677f26..12e81d0028 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -30,8 +30,10 @@ impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock { pub struct DepositTreeSnapshot { pub finalized: Vec, pub deposit_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub execution_block_hash: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub execution_block_height: u64, } diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 378e8d34b7..64bfb8da0b 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -373,7 +373,7 @@ impl EthSpec for GnosisEthSpec { type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; - type MaxWithdrawalsPerPayload = U16; + type MaxWithdrawalsPerPayload = U8; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 007d4c4daa..85144a6137 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -24,6 +24,11 @@ impl ForkName { ] } + pub fn latest() -> ForkName { + // This unwrap is safe as long as we have 1+ forks. It is tested below. + *ForkName::list_all().last().unwrap() + } + /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` /// is the only fork in effect from genesis. pub fn make_genesis_spec(&self, mut spec: ChainSpec) -> ChainSpec { @@ -178,7 +183,7 @@ mod test { #[test] fn previous_and_next_fork_consistent() { - assert_eq!(ForkName::Capella.next_fork(), None); + assert_eq!(ForkName::latest().next_fork(), None); assert_eq!(ForkName::Base.previous_fork(), None); for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { @@ -211,4 +216,15 @@ mod test { assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge)); assert_eq!(ForkName::Merge.to_string(), "bellatrix"); } + + #[test] + fn fork_name_latest() { + assert_eq!(ForkName::latest(), *ForkName::list_all().last().unwrap()); + + let mut fork = ForkName::Base; + while let Some(next_fork) = fork.next_fork() { + fork = next_fork; + } + assert_eq!(ForkName::latest(), fork); + } } diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index b885f89f7d..6793fe5574 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -80,15 +80,26 @@ impl SubnetId { epoch: Epoch, spec: &ChainSpec, ) -> Result<(impl Iterator, Epoch), &'static str> { + // Simplify the variable name + let subscription_duration = spec.epochs_per_subnet_subscription; + let node_id_prefix = (node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize(); - let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; + // NOTE: The as_u64() panics if the number is larger than u64::max_value(). This cannot be + // true as spec.epochs_per_subnet_subscription is a u64. + let node_offset = (node_id % ethereum_types::U256::from(subscription_duration)).as_u64(); + + // Calculate at which epoch this node needs to re-evaluate + let valid_until_epoch = epoch.as_u64() + + subscription_duration + .saturating_sub((epoch.as_u64() + node_offset) % subscription_duration); + + let subscription_event_idx = (epoch.as_u64() + node_offset) / subscription_duration; let permutation_seed = ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); - let permutated_prefix = compute_shuffled_index( node_id_prefix, num_subnets, @@ -107,7 +118,6 @@ impl SubnetId { let subnet_set_generator = (0..subnets_per_node).map(move |idx| { SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count) }); - let valid_until_epoch = (subscription_event_idx + 1) * spec.epochs_per_subnet_subscription; Ok((subnet_set_generator, valid_until_epoch.into())) } } @@ -149,3 +159,80 @@ impl AsRef for SubnetId { subnet_id_to_string(self.0) } } + +#[cfg(test)] +mod tests { + use super::*; + + /// A set of tests compared to the python specification + #[test] + fn compute_subnets_for_epoch_unit_test() { + // Randomized variables used generated with the python specification + let node_ids = [ + "0", + "88752428858350697756262172400162263450541348766581994718383409852729519486397", + "18732750322395381632951253735273868184515463718109267674920115648614659369468", + "27726842142488109545414954493849224833670205008410190955613662332153332462900", + "39755236029158558527862903296867805548949739810920318269566095185775868999998", + "31899136003441886988955119620035330314647133604576220223892254902004850516297", + "58579998103852084482416614330746509727562027284701078483890722833654510444626", + "28248042035542126088870192155378394518950310811868093527036637864276176517397", + "60930578857433095740782970114409273483106482059893286066493409689627770333527", + "103822458477361691467064888613019442068586830412598673713899771287914656699997", + ] + .into_iter() + .map(|v| ethereum_types::U256::from_dec_str(v).unwrap()) + .collect::>(); + + let epochs = [ + 54321u64, 1017090249, 1827566880, 846255942, 766597383, 1204990115, 1616209495, + 1774367616, 1484598751, 3525502229, + ] + .into_iter() + .map(Epoch::from) + .collect::>(); + + // Test mainnet + let spec = ChainSpec::mainnet(); + + // Calculated by hand + let expected_valid_time: Vec = [ + 54528, 1017090371, 1827567108, 846256076, 766597570, 1204990135, 1616209582, + 1774367723, 1484598953, 3525502371, + ] + .into(); + + // Calculated from pyspec + let expected_subnets = vec![ + vec![4u64, 5u64], + vec![61, 62], + vec![23, 24], + vec![38, 39], + vec![53, 54], + vec![39, 40], + vec![48, 49], + vec![39, 40], + vec![34, 35], + vec![37, 38], + ]; + + for x in 0..node_ids.len() { + println!("Test: {}", x); + println!( + "NodeId: {}\n Epoch: {}\n, expected_update_time: {}\n, expected_subnets: {:?}", + node_ids[x], epochs[x], expected_valid_time[x], expected_subnets[x] + ); + + let (computed_subnets, valid_time) = SubnetId::compute_subnets_for_epoch::< + crate::MainnetEthSpec, + >(node_ids[x], epochs[x], &spec) + .unwrap(); + + assert_eq!(Epoch::from(expected_valid_time[x]), valid_time); + assert_eq!( + expected_subnets[x], + computed_subnets.map(SubnetId::into).collect::>() + ); + } + } +} diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 33accfc057..ce0b094b77 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -6,6 +6,9 @@ use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; use clap::{App, Arg, ArgMatches}; use environment::{Environment, RuntimeContext}; use slog::{info, Logger}; +use std::fs; +use std::io::Write; +use std::path::PathBuf; use store::{ errors::Error, metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, @@ -57,6 +60,13 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("sizes") .possible_values(InspectTarget::VARIANTS), ) + .arg( + Arg::with_name("output-dir") + .long("output-dir") + .value_name("DIR") + .help("Base directory for the output files. Defaults to the current directory") + .takes_value(true), + ) } pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { @@ -154,18 +164,27 @@ pub enum InspectTarget { ValueSizes, #[strum(serialize = "total")] ValueTotal, + #[strum(serialize = "values")] + Values, } pub struct InspectConfig { column: DBColumn, target: InspectTarget, + /// Configures where the inspect output should be stored. + output_dir: PathBuf, } fn parse_inspect_config(cli_args: &ArgMatches) -> Result { let column = clap_utils::parse_required(cli_args, "column")?; let target = clap_utils::parse_required(cli_args, "output")?; - - Ok(InspectConfig { column, target }) + let output_dir: PathBuf = + clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); + Ok(InspectConfig { + column, + target, + output_dir, + }) } pub fn inspect_db( @@ -173,7 +192,7 @@ pub fn inspect_db( client_config: ClientConfig, runtime_context: &RuntimeContext, log: Logger, -) -> Result<(), Error> { +) -> Result<(), String> { let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); @@ -185,12 +204,19 @@ pub fn inspect_db( client_config.store, spec, log, - )?; + ) + .map_err(|e| format!("{:?}", e))?; let mut total = 0; + let base_path = &inspect_config.output_dir; + + if let InspectTarget::Values = inspect_config.target { + fs::create_dir_all(base_path) + .map_err(|e| format!("Unable to create import directory: {:?}", e))?; + } for res in db.hot_db.iter_column(inspect_config.column) { - let (key, value) = res?; + let (key, value) = res.map_err(|e| format!("{:?}", e))?; match inspect_config.target { InspectTarget::ValueSizes => { @@ -200,11 +226,32 @@ pub fn inspect_db( InspectTarget::ValueTotal => { total += value.len(); } + InspectTarget::Values => { + let file_path = + base_path.join(format!("{}_{}.ssz", inspect_config.column.as_str(), key)); + + let write_result = fs::OpenOptions::new() + .create(true) + .write(true) + .open(&file_path) + .map_err(|e| format!("Failed to open file: {:?}", e)) + .map(|mut file| { + file.write_all(&value) + .map_err(|e| format!("Failed to write file: {:?}", e)) + }); + if let Err(e) = write_result { + println!("Error writing values to file {:?}: {:?}", file_path, e); + } else { + println!("Successfully saved values to file: {:?}", file_path); + } + + total += value.len(); + } } } match inspect_config.target { - InspectTarget::ValueSizes | InspectTarget::ValueTotal => { + InspectTarget::ValueSizes | InspectTarget::ValueTotal | InspectTarget::Values => { println!("Total: {} bytes", total); } } @@ -292,21 +339,23 @@ pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result let client_config = parse_client_config(cli_args, &env)?; let context = env.core_context(); let log = context.log().clone(); + let format_err = |e| format!("Fatal error: {:?}", e); match cli_args.subcommand() { - ("version", Some(_)) => display_db_version(client_config, &context, log), + ("version", Some(_)) => { + display_db_version(client_config, &context, log).map_err(format_err) + } ("migrate", Some(cli_args)) => { let migrate_config = parse_migrate_config(cli_args)?; - migrate_db(migrate_config, client_config, &context, log) + migrate_db(migrate_config, client_config, &context, log).map_err(format_err) } ("inspect", Some(cli_args)) => { let inspect_config = parse_inspect_config(cli_args)?; inspect_db(inspect_config, client_config, &context, log) } - ("prune_payloads", Some(_)) => prune_payloads(client_config, &context, log), - _ => { - return Err("Unknown subcommand, for help `lighthouse database_manager --help`".into()) + ("prune_payloads", Some(_)) => { + prune_payloads(client_config, &context, log).map_err(format_err) } + _ => Err("Unknown subcommand, for help `lighthouse database_manager --help`".into()), } - .map_err(|e| format!("Fatal error: {:?}", e)) } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 9e7f2fdb08..f9d0a6a31c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.1.0" +version = "4.3.0" authors = ["Paul Hauner "] edition = "2021" @@ -21,6 +21,7 @@ env_logger = "0.9.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } int_to_bytes = { path = "../consensus/int_to_bytes" } +ethereum_hashing = "1.0.0-beta.2" ethereum_ssz = "0.5.0" environment = { path = "../lighthouse/environment" } eth2_network_config = { path = "../common/eth2_network_config" } @@ -34,7 +35,6 @@ lighthouse_version = { path = "../common/lighthouse_version" } directory = { path = "../common/directory" } account_utils = { path = "../common/account_utils" } eth2_wallet = { path = "../crypto/eth2_wallet" } -web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } @@ -42,6 +42,7 @@ snap = "1.0.1" beacon_chain = { path = "../beacon_node/beacon_chain" } store = { path = "../beacon_node/store" } malloc_utils = { path = "../common/malloc_utils" } +rayon = "1.7.0" [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index 1128eb52ab..8919ebdaf5 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -2,19 +2,18 @@ use clap::ArgMatches; use environment::Environment; use types::EthSpec; -use web3::{transports::Http, Web3}; +use eth1_test_rig::{Http, Provider}; pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let eth1_http: String = clap_utils::parse_required(matches, "eth1-http")?; let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; let validator_count: Option = clap_utils::parse_optional(matches, "validator-count")?; - let transport = - Http::new(ð1_http).map_err(|e| format!("Unable to connect to eth1 HTTP: {:?}", e))?; - let web3 = Web3::new(transport); + let client = Provider::::try_from(ð1_http) + .map_err(|e| format!("Unable to connect to eth1 HTTP: {:?}", e))?; env.runtime().block_on(async { - let contract = eth1_test_rig::DepositContract::deploy(web3, confirmations, None) + let contract = eth1_test_rig::DepositContract::deploy(client, confirmations, None) .await .map_err(|e| format!("Failed to deploy deposit contract: {:?}", e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index eeb098f04d..d072beaa4e 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -10,6 +10,7 @@ mod generate_bootnode_enr; mod indexed_attestations; mod insecure_validators; mod interop_genesis; +mod mnemonic_validators; mod new_testnet; mod parse_ssz; mod replace_state_pubkeys; @@ -449,6 +450,22 @@ fn main() { "If present, a interop-style genesis.ssz file will be generated.", ), ) + .arg( + Arg::with_name("derived-genesis-state") + .long("derived-genesis-state") + .takes_value(false) + .help( + "If present, a genesis.ssz file will be generated with keys generated from a given mnemonic.", + ), + ) + .arg( + Arg::with_name("mnemonic-phrase") + .long("mnemonic-phrase") + .value_name("MNEMONIC_PHRASE") + .takes_value(true) + .requires("derived-genesis-state") + .help("The mnemonic with which we generate the validator keys for a derived genesis state"), + ) .arg( Arg::with_name("min-genesis-time") .long("min-genesis-time") @@ -568,14 +585,32 @@ fn main() { ), ) .arg( - Arg::with_name("merge-fork-epoch") - .long("merge-fork-epoch") + Arg::with_name("bellatrix-fork-epoch") + .long("bellatrix-fork-epoch") .value_name("EPOCH") .takes_value(true) .help( "The epoch at which to enable the Merge hard fork", ), ) + .arg( + Arg::with_name("capella-fork-epoch") + .long("capella-fork-epoch") + .value_name("EPOCH") + .takes_value(true) + .help( + "The epoch at which to enable the Capella hard fork", + ), + ) + .arg( + Arg::with_name("ttd") + .long("ttd") + .value_name("TTD") + .takes_value(true) + .help( + "The terminal total difficulty", + ), + ) .arg( Arg::with_name("eth1-block-hash") .long("eth1-block-hash") @@ -695,6 +730,7 @@ fn main() { .long("count") .value_name("COUNT") .takes_value(true) + .required(true) .help("Produces validators in the range of 0..count."), ) .arg( @@ -702,6 +738,7 @@ fn main() { .long("base-dir") .value_name("BASE_DIR") .takes_value(true) + .required(true) .help("The base directory where validator keypairs and secrets are stored"), ) .arg( @@ -712,6 +749,43 @@ fn main() { .help("The number of nodes to divide the validator keys to"), ) ) + .subcommand( + SubCommand::with_name("mnemonic-validators") + .about("Produces validator directories by deriving the keys from \ + a mnemonic. For testing purposes only, DO NOT USE IN \ + PRODUCTION!") + .arg( + Arg::with_name("count") + .long("count") + .value_name("COUNT") + .takes_value(true) + .required(true) + .help("Produces validators in the range of 0..count."), + ) + .arg( + Arg::with_name("base-dir") + .long("base-dir") + .value_name("BASE_DIR") + .takes_value(true) + .required(true) + .help("The base directory where validator keypairs and secrets are stored"), + ) + .arg( + Arg::with_name("node-count") + .long("node-count") + .value_name("NODE_COUNT") + .takes_value(true) + .help("The number of nodes to divide the validator keys to"), + ) + .arg( + Arg::with_name("mnemonic-phrase") + .long("mnemonic-phrase") + .value_name("MNEMONIC_PHRASE") + .takes_value(true) + .required(true) + .help("The mnemonic with which we generate the validator keys"), + ) + ) .subcommand( SubCommand::with_name("indexed-attestations") .about("Convert attestations to indexed form, using the committees from a state.") @@ -807,6 +881,7 @@ fn run( max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, // No SSE Logging in LCLI }) .map_err(|e| format!("should start logger: {:?}", e))? .build() @@ -853,6 +928,8 @@ fn run( .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), ("insecure-validators", Some(matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), + ("mnemonic-validators", Some(matches)) => mnemonic_validators::run(matches) + .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), ("block-root", Some(matches)) => block_root::run::(env, matches) diff --git a/lcli/src/mnemonic_validators.rs b/lcli/src/mnemonic_validators.rs new file mode 100644 index 0000000000..2653aee149 --- /dev/null +++ b/lcli/src/mnemonic_validators.rs @@ -0,0 +1,104 @@ +use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; +use account_utils::random_password; +use clap::ArgMatches; +use eth2_wallet::bip39::Seed; +use eth2_wallet::bip39::{Language, Mnemonic}; +use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; +use rayon::prelude::*; +use std::fs; +use std::path::PathBuf; +use validator_dir::Builder as ValidatorBuilder; + +/// Generates validator directories with keys derived from the given mnemonic. +pub fn generate_validator_dirs( + indices: &[usize], + mnemonic_phrase: &str, + validators_dir: PathBuf, + secrets_dir: PathBuf, +) -> Result<(), String> { + if !validators_dir.exists() { + fs::create_dir_all(&validators_dir) + .map_err(|e| format!("Unable to create validators dir: {:?}", e))?; + } + + if !secrets_dir.exists() { + fs::create_dir_all(&secrets_dir) + .map_err(|e| format!("Unable to create secrets dir: {:?}", e))?; + } + let mnemonic = Mnemonic::from_phrase(mnemonic_phrase, Language::English).map_err(|e| { + format!( + "Unable to derive mnemonic from string {:?}: {:?}", + mnemonic_phrase, e + ) + })?; + + let seed = Seed::new(&mnemonic, ""); + + let _: Vec<_> = indices + .par_iter() + .map(|index| { + let voting_password = random_password(); + + let derive = |key_type: KeyType, password: &[u8]| -> Result { + let (secret, path) = recover_validator_secret_from_mnemonic( + seed.as_bytes(), + *index as u32, + key_type, + ) + .map_err(|e| format!("Unable to recover validator keys: {:?}", e))?; + + let keypair = keypair_from_secret(secret.as_bytes()) + .map_err(|e| format!("Unable build keystore: {:?}", e))?; + + KeystoreBuilder::new(&keypair, password, format!("{}", path)) + .map_err(|e| format!("Unable build keystore: {:?}", e))? + .build() + .map_err(|e| format!("Unable build keystore: {:?}", e)) + }; + + let voting_keystore = derive(KeyType::Voting, voting_password.as_bytes()).unwrap(); + + println!("Validator {}", index + 1); + + ValidatorBuilder::new(validators_dir.clone()) + .password_dir(secrets_dir.clone()) + .store_withdrawal_keystore(false) + .voting_keystore(voting_keystore, voting_password.as_bytes()) + .build() + .map_err(|e| format!("Unable to build validator: {:?}", e)) + .unwrap() + }) + .collect(); + + Ok(()) +} + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let validator_count: usize = clap_utils::parse_required(matches, "count")?; + let base_dir: PathBuf = clap_utils::parse_required(matches, "base-dir")?; + let node_count: Option = clap_utils::parse_optional(matches, "node-count")?; + let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; + if let Some(node_count) = node_count { + let validators_per_node = validator_count / node_count; + let validator_range = (0..validator_count).collect::>(); + let indices_range = validator_range + .chunks(validators_per_node) + .collect::>(); + + for (i, indices) in indices_range.iter().enumerate() { + let validators_dir = base_dir.join(format!("node_{}", i + 1)).join("validators"); + let secrets_dir = base_dir.join(format!("node_{}", i + 1)).join("secrets"); + generate_validator_dirs(indices, &mnemonic_phrase, validators_dir, secrets_dir)?; + } + } else { + let validators_dir = base_dir.join("validators"); + let secrets_dir = base_dir.join("secrets"); + generate_validator_dirs( + (0..validator_count).collect::>().as_slice(), + &mnemonic_phrase, + validators_dir, + secrets_dir, + )?; + } + Ok(()) +} diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 5af22731f3..01a44cabef 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,16 +1,26 @@ +use account_utils::eth2_keystore::keypair_from_secret; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; use eth2_network_config::Eth2NetworkConfig; -use genesis::interop_genesis_state; +use eth2_wallet::bip39::Seed; +use eth2_wallet::bip39::{Language, Mnemonic}; +use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; +use ethereum_hashing::hash; use ssz::Decode; use ssz::Encode; +use state_processing::process_activations; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use std::fs::File; use std::io::Read; use std::path::PathBuf; +use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; +use types::ExecutionBlockHash; use types::{ - test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec, - ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, ForkName, + test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, + Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, ForkName, Hash256, Keypair, + PublicKey, Validator, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -67,63 +77,69 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.altair_fork_epoch = Some(fork_epoch); } - if let Some(fork_epoch) = parse_optional(matches, "merge-fork-epoch")? { + if let Some(fork_epoch) = parse_optional(matches, "bellatrix-fork-epoch")? { spec.bellatrix_fork_epoch = Some(fork_epoch); } - let genesis_state_bytes = if matches.is_present("interop-genesis-state") { - let execution_payload_header: Option> = - parse_optional(matches, "execution-payload-header")? - .map(|filename: String| { - let mut bytes = vec![]; - let mut file = File::open(filename.as_str()) - .map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); - match fork_name { - ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( - "genesis fork must be post-merge".to_string(), - )), - ForkName::Merge => { - ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Merge) - } - ForkName::Capella => { - ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Capella) - } + if let Some(fork_epoch) = parse_optional(matches, "capella-fork-epoch")? { + spec.capella_fork_epoch = Some(fork_epoch); + } + + if let Some(ttd) = parse_optional(matches, "ttd")? { + spec.terminal_total_difficulty = ttd; + } + + let validator_count = parse_required(matches, "validator-count")?; + let execution_payload_header: Option> = + parse_optional(matches, "execution-payload-header")? + .map(|filename: String| { + let mut bytes = vec![]; + let mut file = File::open(filename.as_str()) + .map_err(|e| format!("Unable to open {}: {}", filename, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) } - .map_err(|e| format!("SSZ decode failed: {:?}", e)) - }) - .transpose()?; + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) + }) + .transpose()?; - let (eth1_block_hash, genesis_time) = if let Some(payload) = - execution_payload_header.as_ref() - { - let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); - let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); - (eth1_block_hash, genesis_time) - } else { - let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { - "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() - })?; - let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs(), - ); - (eth1_block_hash, genesis_time) - }; - - let validator_count = parse_required(matches, "validator-count")?; + let (eth1_block_hash, genesis_time) = if let Some(payload) = execution_payload_header.as_ref() { + let eth1_block_hash = + parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); + let genesis_time = + parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); + (eth1_block_hash, genesis_time) + } else { + let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { + "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() + })?; + let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to get time: {:?}", e))? + .as_secs(), + ); + (eth1_block_hash, genesis_time) + }; + let genesis_state_bytes = if matches.is_present("interop-genesis-state") { let keypairs = generate_deterministic_keypairs(validator_count); + let keypairs: Vec<_> = keypairs.into_iter().map(|kp| (kp.clone(), kp)).collect(); - let genesis_state = interop_genesis_state::( + let genesis_state = initialize_state_with_validators::( &keypairs, genesis_time, eth1_block_hash.into_root(), @@ -131,6 +147,41 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul &spec, )?; + Some(genesis_state.as_ssz_bytes()) + } else if matches.is_present("derived-genesis-state") { + let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; + let mnemonic = Mnemonic::from_phrase(&mnemonic_phrase, Language::English).map_err(|e| { + format!( + "Unable to derive mnemonic from string {:?}: {:?}", + mnemonic_phrase, e + ) + })?; + let seed = Seed::new(&mnemonic, ""); + let keypairs = (0..validator_count as u32) + .map(|index| { + let (secret, _) = + recover_validator_secret_from_mnemonic(seed.as_bytes(), index, KeyType::Voting) + .unwrap(); + + let voting_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); + + let (secret, _) = recover_validator_secret_from_mnemonic( + seed.as_bytes(), + index, + KeyType::Withdrawal, + ) + .unwrap(); + let withdrawal_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); + (voting_keypair, withdrawal_keypair) + }) + .collect::>(); + let genesis_state = initialize_state_with_validators::( + &keypairs, + genesis_time, + eth1_block_hash.into_root(), + execution_payload_header, + &spec, + )?; Some(genesis_state.as_ssz_bytes()) } else { None @@ -145,3 +196,117 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul testnet.write_to_file(testnet_dir_path, overwrite_files) } + +/// Returns a `BeaconState` with the given validator keypairs embedded into the +/// genesis state. This allows us to start testnets without having to deposit validators +/// manually. +/// +/// The optional `execution_payload_header` allows us to start a network from the bellatrix +/// fork without the need to transition to altair and bellatrix. +/// +/// We need to ensure that `eth1_block_hash` is equal to the genesis block hash that is +/// generated from the execution side `genesis.json`. +fn initialize_state_with_validators( + keypairs: &[(Keypair, Keypair)], // Voting and Withdrawal keypairs + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + // If no header is provided, then start from a Bellatrix state by default + let default_header: ExecutionPayloadHeader = + ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + block_hash: ExecutionBlockHash::from_root(eth1_block_hash), + parent_hash: ExecutionBlockHash::zero(), + ..ExecutionPayloadHeaderMerge::default() + }); + let execution_payload_header = execution_payload_header.unwrap_or(default_header); + // Empty eth1 data + let eth1_data = Eth1Data { + block_hash: eth1_block_hash, + deposit_count: 0, + deposit_root: Hash256::from_str( + "0xd70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e", + ) + .unwrap(), // empty deposit tree root + }; + let mut state = BeaconState::new(genesis_time, eth1_data, spec); + + // Seed RANDAO with Eth1 entropy + state.fill_randao_mixes_with(eth1_block_hash); + + for keypair in keypairs.iter() { + let withdrawal_credentials = |pubkey: &PublicKey| { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) + }; + let amount = spec.max_effective_balance; + // Create a new validator. + let validator = Validator { + pubkey: keypair.0.pk.clone().into(), + withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount - amount % (spec.effective_balance_increment), + spec.max_effective_balance, + ), + slashed: false, + }; + state.validators_mut().push(validator).unwrap(); + state.balances_mut().push(amount).unwrap(); + } + + process_activations(&mut state, spec).unwrap(); + + if spec + .altair_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_altair(&mut state, spec).unwrap(); + + state.fork_mut().previous_version = spec.altair_fork_version; + } + + // Similarly, perform an upgrade to the merge if configured from genesis. + if spec + .bellatrix_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_bellatrix(&mut state, spec).unwrap(); + + // Remove intermediate Altair fork from `state.fork`. + state.fork_mut().previous_version = spec.bellatrix_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing + + // Currently, we only support starting from a bellatrix state + match state + .latest_execution_payload_header_mut() + .map_err(|e| format!("Failed to get execution payload header: {:?}", e))? + { + ExecutionPayloadHeaderRefMut::Merge(header_mut) => { + if let ExecutionPayloadHeader::Merge(eph) = execution_payload_header { + *header_mut = eph; + } else { + return Err("Execution payload header must be a bellatrix header".to_string()); + } + } + ExecutionPayloadHeaderRefMut::Capella(_) => { + return Err("Cannot start genesis from a capella state".to_string()) + } + } + } + + // Now that we have our validators, initialize the caches (including the committees) + state.build_caches(spec).unwrap(); + + // Set genesis validators root for domain separation and chain versioning + *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); + + Ok(state) +} diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 49d1dd424d..e3b2a5acbf 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -109,7 +109,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), let target_slot = initial_slot + slots; state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = if let Some(root) = cli_state_root.or(state_root) { diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index cf971c69f0..34a4560761 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -205,7 +205,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), if config.exclude_cache_builds { pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = pre_state .update_tree_hash_cache() @@ -303,7 +303,7 @@ fn do_transition( if !config.exclude_cache_builds { let t = Instant::now(); pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build caches: {:?}", t.elapsed()); @@ -335,7 +335,7 @@ fn do_transition( let t = Instant::now(); pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 48f47c6d48..169aa67fdd 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "lighthouse" -version = "4.1.0" +version = "4.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false rust-version = "1.68.2" [features] -default = ["slasher-mdbx"] +default = ["slasher-lmdb"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. @@ -55,7 +55,7 @@ malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } -slasher = { path = "../slasher", default-features = false } +slasher = { path = "../slasher" } validator_manager = { path = "../validator_manager" } [dev-dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 8ef67e82dd..53915b52d9 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -12,6 +12,7 @@ use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; +use logging::SSELoggingComponents; use serde_derive::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; @@ -36,6 +37,7 @@ use {futures::channel::oneshot, std::cell::RefCell}; pub use task_executor::test_utils::null_logger; const LOG_CHANNEL_SIZE: usize = 2048; +const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; @@ -57,6 +59,7 @@ pub struct LoggerConfig { pub max_log_number: usize, pub compression: bool, pub is_restricted: bool, + pub sse_logging: bool, } impl Default for LoggerConfig { fn default() -> Self { @@ -72,14 +75,54 @@ impl Default for LoggerConfig { max_log_number: 5, compression: false, is_restricted: true, + sse_logging: false, } } } +/// An execution context that can be used by a service. +/// +/// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a +/// `Runtime`, instead it only has access to a `Runtime`. +#[derive(Clone)] +pub struct RuntimeContext { + pub executor: TaskExecutor, + pub eth_spec_instance: E, + pub eth2_config: Eth2Config, + pub eth2_network_config: Option>, + pub sse_logging_components: Option, +} + +impl RuntimeContext { + /// Returns a sub-context of this context. + /// + /// The generated service will have the `service_name` in all it's logs. + pub fn service_context(&self, service_name: String) -> Self { + Self { + executor: self.executor.clone_with_name(service_name), + eth_spec_instance: self.eth_spec_instance.clone(), + eth2_config: self.eth2_config.clone(), + eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), + } + } + + /// Returns the `eth2_config` for this service. + pub fn eth2_config(&self) -> &Eth2Config { + &self.eth2_config + } + + /// Returns a reference to the logger for this service. + pub fn log(&self) -> &slog::Logger { + self.executor.log() + } +} + /// Builds an `Environment`. pub struct EnvironmentBuilder { runtime: Option>, log: Option, + sse_logging_components: Option, eth_spec_instance: E, eth2_config: Eth2Config, eth2_network_config: Option, @@ -91,6 +134,7 @@ impl EnvironmentBuilder { Self { runtime: None, log: None, + sse_logging_components: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), eth2_network_config: None, @@ -104,6 +148,7 @@ impl EnvironmentBuilder { Self { runtime: None, log: None, + sse_logging_components: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), eth2_network_config: None, @@ -117,6 +162,7 @@ impl EnvironmentBuilder { Self { runtime: None, log: None, + sse_logging_components: None, eth_spec_instance: GnosisEthSpec, eth2_config: Eth2Config::gnosis(), eth2_network_config: None, @@ -265,7 +311,7 @@ impl EnvironmentBuilder { .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; - let log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); + let mut log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); info!( log, @@ -273,6 +319,14 @@ impl EnvironmentBuilder { "path" => format!("{:?}", path) ); + // If the http API is enabled, we may need to send logs to be consumed by subscribers. + if config.sse_logging { + let sse_logger = SSELoggingComponents::new(SSE_LOG_CHANNEL_SIZE); + self.sse_logging_components = Some(sse_logger.clone()); + + log = Logger::root(Duplicate::new(log, sse_logger).fuse(), o!()); + } + self.log = Some(log); Ok(self) @@ -315,6 +369,7 @@ impl EnvironmentBuilder { signal: Some(signal), exit, log: self.log.ok_or("Cannot build environment without log")?, + sse_logging_components: self.sse_logging_components, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, eth2_network_config: self.eth2_network_config.map(Arc::new), @@ -322,42 +377,6 @@ impl EnvironmentBuilder { } } -/// An execution context that can be used by a service. -/// -/// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a -/// `Runtime`, instead it only has access to a `Runtime`. -#[derive(Clone)] -pub struct RuntimeContext { - pub executor: TaskExecutor, - pub eth_spec_instance: E, - pub eth2_config: Eth2Config, - pub eth2_network_config: Option>, -} - -impl RuntimeContext { - /// Returns a sub-context of this context. - /// - /// The generated service will have the `service_name` in all it's logs. - pub fn service_context(&self, service_name: String) -> Self { - Self { - executor: self.executor.clone_with_name(service_name), - eth_spec_instance: self.eth_spec_instance.clone(), - eth2_config: self.eth2_config.clone(), - eth2_network_config: self.eth2_network_config.clone(), - } - } - - /// Returns the `eth2_config` for this service. - pub fn eth2_config(&self) -> &Eth2Config { - &self.eth2_config - } - - /// Returns a reference to the logger for this service. - pub fn log(&self) -> &slog::Logger { - self.executor.log() - } -} - /// An environment where Lighthouse services can run. Used to start a production beacon node or /// validator client, or to run tests that involve logging and async task execution. pub struct Environment { @@ -369,6 +388,7 @@ pub struct Environment { signal: Option, exit: exit_future::Exit, log: Logger, + sse_logging_components: Option, eth_spec_instance: E, pub eth2_config: Eth2Config, pub eth2_network_config: Option>, @@ -395,6 +415,7 @@ impl Environment { eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), } } @@ -410,6 +431,7 @@ impl Environment { eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), } } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index c8b963a9bd..73e042342a 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -484,6 +484,16 @@ fn run( }; } + let sse_logging = { + if let Some(bn_matches) = matches.subcommand_matches("beacon_node") { + bn_matches.is_present("gui") + } else if let Some(vc_matches) = matches.subcommand_matches("validator_client") { + vc_matches.is_present("http") + } else { + false + } + }; + let logger_config = LoggerConfig { path: log_path, debug_level: String::from(debug_level), @@ -496,6 +506,7 @@ fn run( max_log_number: logfile_max_number, compression: logfile_compress, is_restricted: logfile_restricted, + sse_logging, }; let builder = environment_builder.initialize_logger(logger_config.clone())?; diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 696830a0d1..63d79fceb2 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -28,10 +28,6 @@ use tempfile::{tempdir, TempDir}; use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; -// TODO: create tests for the `lighthouse account validator deposit` command. This involves getting -// access to an IPC endpoint during testing or adding support for deposit submission via HTTP and -// using ganache. - /// Returns the `lighthouse account` command. fn account_cmd() -> Command { let lighthouse_bin = env!("CARGO_BIN_EXE_lighthouse"); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7e647c904d..9b6d23ddcf 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -16,7 +16,10 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; +use types::{ + Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, + ProgressiveBalancesMode, +}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -1125,48 +1128,13 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let mainnet = vec![ - // Lighthouse Team (Sigma Prime) - "enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo", - "enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo", - // EF Team - "enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg", - "enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg", - "enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg", - "enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg", - // Teku team (Consensys) - "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", - "enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA", - // Prysm team (Prysmatic Labs) - "enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg", - "enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA", - "enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg", - // Nimbus team - "enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM", - "enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM" - ]; + let number_of_boot_nodes = 15; CommandLineTest::new() .run_with_zero_port() .with_config(|config| { // Lighthouse Team (Sigma Prime) - assert_eq!(config.network.boot_nodes_enr[0].to_base64(), mainnet[0]); - assert_eq!(config.network.boot_nodes_enr[1].to_base64(), mainnet[1]); - // EF Team - assert_eq!(config.network.boot_nodes_enr[2].to_base64(), mainnet[2]); - assert_eq!(config.network.boot_nodes_enr[3].to_base64(), mainnet[3]); - assert_eq!(config.network.boot_nodes_enr[4].to_base64(), mainnet[4]); - assert_eq!(config.network.boot_nodes_enr[5].to_base64(), mainnet[5]); - // Teku team (Consensys) - assert_eq!(config.network.boot_nodes_enr[6].to_base64(), mainnet[6]); - assert_eq!(config.network.boot_nodes_enr[7].to_base64(), mainnet[7]); - // Prysm team (Prysmatic Labs) - assert_eq!(config.network.boot_nodes_enr[8].to_base64(), mainnet[8]); - assert_eq!(config.network.boot_nodes_enr[9].to_base64(), mainnet[9]); - assert_eq!(config.network.boot_nodes_enr[10].to_base64(), mainnet[10]); - // Nimbus team - assert_eq!(config.network.boot_nodes_enr[11].to_base64(), mainnet[11]); - assert_eq!(config.network.boot_nodes_enr[12].to_base64(), mainnet[12]); + assert_eq!(config.network.boot_nodes_enr.len(), number_of_boot_nodes); }); } #[test] @@ -1451,6 +1419,26 @@ fn empty_self_limiter_flag() { ) }); } + +#[test] +fn empty_inbound_rate_limiter_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.inbound_rate_limiter_config, + Some(lighthouse_network::rpc::config::InboundRateLimiterConfig::default()) + ) + }); +} +#[test] +fn disable_inbound_rate_limiter_flag() { + CommandLineTest::new() + .flag("inbound-rate-limiter", Some("disabled")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.inbound_rate_limiter_config, None)); +} + #[test] fn http_allow_origin_flag() { CommandLineTest::new() @@ -1747,10 +1735,12 @@ fn no_reconstruct_historic_states_flag() { } // Tests for Slasher flags. +// Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342 #[test] fn slasher_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .run_with_zero_port() .with_config_and_dir(|config, dir| { if let Some(slasher_config) = &config.slasher { @@ -1768,6 +1758,7 @@ fn slasher_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-dir", dir.path().as_os_str().to_str()) .run_with_zero_port() .with_config(|config| { @@ -1782,6 +1773,7 @@ fn slasher_dir_flag() { fn slasher_update_period_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-update-period", Some("100")) .run_with_zero_port() .with_config(|config| { @@ -1796,6 +1788,7 @@ fn slasher_update_period_flag() { fn slasher_slot_offset_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-slot-offset", Some("11.25")) .run_with_zero_port() .with_config(|config| { @@ -1808,6 +1801,7 @@ fn slasher_slot_offset_flag() { fn slasher_slot_offset_nan_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-slot-offset", Some("NaN")) .run_with_zero_port(); } @@ -1815,6 +1809,7 @@ fn slasher_slot_offset_nan_flag() { fn slasher_history_length_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-history-length", Some("2048")) .run_with_zero_port() .with_config(|config| { @@ -1829,20 +1824,21 @@ fn slasher_history_length_flag() { fn slasher_max_db_size_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("10")) + .flag("slasher-max-db-size", Some("2")) .run_with_zero_port() .with_config(|config| { let slasher_config = config .slasher .as_ref() .expect("Unable to parse Slasher config"); - assert_eq!(slasher_config.max_db_size_mbs, 10240); + assert_eq!(slasher_config.max_db_size_mbs, 2048); }); } #[test] fn slasher_attestation_cache_size_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-att-cache-size", Some("10000")) .run_with_zero_port() .with_config(|config| { @@ -1857,6 +1853,7 @@ fn slasher_attestation_cache_size_flag() { fn slasher_chunk_size_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-chunk-size", Some("32")) .run_with_zero_port() .with_config(|config| { @@ -1871,6 +1868,7 @@ fn slasher_chunk_size_flag() { fn slasher_validator_chunk_size_flag() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-validator-chunk-size", Some("512")) .run_with_zero_port() .with_config(|config| { @@ -1882,9 +1880,10 @@ fn slasher_validator_chunk_size_flag() { }); } #[test] -fn slasher_broadcast_flag() { +fn slasher_broadcast_flag_no_args() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .flag("slasher-broadcast", None) .run_with_zero_port() .with_config(|config| { @@ -1895,29 +1894,62 @@ fn slasher_broadcast_flag() { assert!(slasher_config.broadcast); }); } - #[test] -fn slasher_backend_default() { +fn slasher_broadcast_flag_no_default() { CommandLineTest::new() .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) .run_with_zero_port() .with_config(|config| { - let slasher_config = config.slasher.as_ref().unwrap(); - assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Mdbx); + let slasher_config = config + .slasher + .as_ref() + .expect("Unable to parse Slasher config"); + assert!(slasher_config.broadcast); + }); +} +#[test] +fn slasher_broadcast_flag_true() { + CommandLineTest::new() + .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) + .flag("slasher-broadcast", Some("true")) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config + .slasher + .as_ref() + .expect("Unable to parse Slasher config"); + assert!(slasher_config.broadcast); + }); +} +#[test] +fn slasher_broadcast_flag_false() { + CommandLineTest::new() + .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) + .flag("slasher-broadcast", Some("false")) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config + .slasher + .as_ref() + .expect("Unable to parse Slasher config"); + assert!(!slasher_config.broadcast); }); } - #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend // called "disabled" results in a panic. CommandLineTest::new() .flag("slasher", None) - .flag("slasher-backend", Some("mdbx")) + .flag("slasher-max-db-size", Some("1")) + .flag("slasher-backend", Some("lmdb")) .run_with_zero_port() .with_config(|config| { let slasher_config = config.slasher.as_ref().unwrap(); - assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Mdbx); + assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Lmdb); }); } @@ -2199,3 +2231,49 @@ fn disable_optimistic_finalized_sync() { assert!(!config.chain.optimistic_finalized_sync); }); } + +#[test] +fn invalid_gossip_verified_blocks_path_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.invalid_block_storage, None)); +} + +#[test] +fn invalid_gossip_verified_blocks_path() { + let path = "/home/karlm/naughty-blocks"; + CommandLineTest::new() + .flag("invalid-gossip-verified-blocks-path", Some(path)) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.invalid_block_storage, + Some(PathBuf::from(path)) + ) + }); +} + +#[test] +fn progressive_balances_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.progressive_balances_mode, + ProgressiveBalancesMode::Checked + ) + }); +} + +#[test] +fn progressive_balances_fast() { + CommandLineTest::new() + .flag("progressive-balances", Some("fast")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.progressive_balances_mode, + ProgressiveBalancesMode::Fast + ) + }); +} diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 4dd5ad95dd..659dea468d 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -39,7 +39,7 @@ impl CommandLineTest { } fn run_with_ip(&mut self) -> CompletedTest { - self.cmd.arg(IP_ADDRESS); + self.cmd.arg("--enr-address").arg(IP_ADDRESS); self.run() } } @@ -67,7 +67,13 @@ fn port_flag() { .flag("port", Some(port.to_string().as_str())) .run_with_ip() .with_config(|config| { - assert_eq!(config.listen_socket.port(), port); + assert_eq!( + config + .ipv4_listen_socket + .expect("Bootnode should be listening on IPv4") + .port(), + port + ); }) } @@ -78,7 +84,13 @@ fn listen_address_flag() { .flag("listen-address", Some("127.0.0.2")) .run_with_ip() .with_config(|config| { - assert_eq!(config.listen_socket.ip(), addr); + assert_eq!( + config + .ipv4_listen_socket + .expect("Bootnode should be listening on IPv4") + .ip(), + &addr + ); }); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 6cef8e597f..9bcfe2a1d5 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -103,10 +103,8 @@ fn beacon_nodes_flag() { #[test] fn allow_unsynced_flag() { - CommandLineTest::new() - .flag("allow-unsynced", None) - .run() - .with_config(|config| assert!(config.allow_unsynced_beacon_node)); + // No-op, but doesn't crash. + CommandLineTest::new().flag("allow-unsynced", None).run(); } #[test] @@ -527,3 +525,24 @@ fn latency_measurement_service() { assert!(!config.enable_latency_measurement_service); }); } + +#[test] +fn validator_registration_batch_size() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!(config.validator_registration_batch_size, 500); + }); + CommandLineTest::new() + .flag("validator-registration-batch-size", Some("100")) + .run() + .with_config(|config| { + assert_eq!(config.validator_registration_batch_size, 100); + }); +} + +#[test] +#[should_panic] +fn validator_registration_batch_size_zero_value() { + CommandLineTest::new() + .flag("validator-registration-batch-size", Some("0")) + .run(); +} diff --git a/scripts/ci/publish.sh b/scripts/ci/publish.sh deleted file mode 100755 index f2cea95b7d..0000000000 --- a/scripts/ci/publish.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env bash - -# Based on: https://github.com/tokio-rs/tokio/blob/master/bin/publish - -set -e -USAGE="Publish a new release of a lighthouse crate -USAGE: - $(basename "$0") [OPTIONS] [CRATE_PATH] [CRATE] [TAG_NAME] -OPTIONS: - -v, --verbose Use verbose Cargo output - -d, --dry-run Perform a dry run (do not publish the release) - -h, --help Show this help text and exit - --allow-dirty Allow dirty working directories to be packaged" - -DRY_RUN="" -DIRTY="" -VERBOSE="" - -verify() { - echo "Verifying if $CRATE v$VERSION can be released" - - # `cargo pkgid` has different formats based on whether the `[lib]` name and `[package]` name - # are the same, necessitating the following logic. - # - # Try to match on `#` - ACTUAL=$(cargo pkgid | sed -n 's/.*#\([0-9]\)/\1/p' ) - if [ -z "$ACTUAL" ]; then - # Match on the final `:` - ACTUAL=$(cargo pkgid | sed -n 's/.*:\(.*\)/\1/p') - fi - - if [ "$ACTUAL" != "$VERSION" ]; then - echo "expected to release version $VERSION, but Cargo.toml contained $ACTUAL" - exit 1 - fi -} - -release() { - echo "Releasing $CRATE v$VERSION" - cargo package $VERBOSE $DIRTY - cargo publish $VERBOSE $DRY_RUN $DIRTY -} - -while [[ $# -gt 0 ]] -do - -case "$1" in - -h|--help) - echo "$USAGE" - exit 0 - ;; - -v|--verbose) - VERBOSE="--verbose" - set +x - shift - ;; - --allow-dirty) - DIRTY="--allow-dirty" - shift - ;; - -d|--dry-run) - DRY_RUN="--dry-run" - shift - ;; - -*) - echo "unknown flag \"$1\"" - echo "$USAGE" - exit 1 - ;; - *) # crate, crate path, or version - if [ -z "$CRATE_PATH" ]; then - CRATE_PATH="$1" - elif [ -z "$CRATE" ]; then - CRATE="$1" - elif [ -z "$TAG_NAME" ]; then - TAG_NAME="$1" - VERSION=$(sed -e 's#.*-v\([0-9]\)#\1#' <<< "$TAG_NAME") - else - echo "unknown positional argument \"$1\"" - echo "$USAGE" - exit 1 - fi - shift - ;; -esac -done -# set -- "${POSITIONAL[@]}" - -if [ -z "$VERSION" ]; then - echo "no version specified!" - HELP=1 -fi - -if [ -z "$CRATE" ]; then - echo "no crate specified!" - HELP=1 -fi - -if [ -n "$HELP" ]; then - echo "$USAGE" - exit 1 -fi - -if [ -d "$CRATE_PATH" ]; then - (cd "$CRATE_PATH" && verify && release ) -else - echo "no such dir \"$CRATE_PATH\"" - exit 1 -fi diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index c4050ac934..f261ea67fd 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,11 +1,16 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with multiple beacon nodes and validator clients. +These scripts allow for running a small local testnet with multiple beacon nodes and validator clients and a geth execution client. This setup can be useful for testing and development. ## Requirements -The scripts require `lcli` and `lighthouse` to be installed on `PATH`. From the +The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH`. + + +MacOS users need to install GNU `sed` and GNU `grep`, and add them both to `PATH` as well. + +From the root of this repository, run: ```bash @@ -17,17 +22,23 @@ make install-lcli Modify `vars.env` as desired. -Start a local eth1 ganache server plus boot node along with `BN_COUNT` -number of beacon nodes and `VC_COUNT` validator clients. +The testnet starts with a post-merge genesis state. +Start a consensus layer and execution layer boot node along with `BN_COUNT` +number of beacon nodes each connected to a geth execution client and `VC_COUNT` validator clients. + +The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. It also takes a mandatory `GENESIS_FILE` for initialising geth's state. +A sample `genesis.json` is provided in this directory. + +The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. -The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` + ```bash -./start_local_testnet.sh +./start_local_testnet.sh genesis.json ``` ## Stopping the testnet @@ -41,31 +52,38 @@ This is not necessary before `start_local_testnet.sh` as it invokes `stop_local_ These scripts are used by ./start_local_testnet.sh and may be used to manually -Start a local eth1 ganache server -```bash -./ganache_test_node.sh -``` - -Assuming you are happy with the configuration in `vars.env`, deploy the deposit contract, make deposits, -create the testnet directory, genesis state and validator keys with: +Assuming you are happy with the configuration in `vars.env`, +create the testnet directory, genesis state with embedded validators and validator keys with: ```bash ./setup.sh ``` -Generate bootnode enr and start a discv5 bootnode so that multiple beacon nodes can find each other +Note: The generated genesis validators are embedded into the genesis state as genesis validators and hence do not require manual deposits to activate. + +Generate bootnode enr and start an EL and CL bootnode so that multiple nodes can find each other ```bash ./bootnode.sh +./el_bootnode.sh +``` + +Start a geth node: +```bash +./geth.sh +``` +e.g. +```bash +./geth.sh $HOME/.lighthouse/local-testnet/geth_1 5000 6000 7000 genesis.json ``` Start a beacon node: ```bash -./beacon_node.sh +./beacon_node.sh ``` e.g. ```bash -./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 +./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:6000 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret ``` In a new terminal, start the validator client which will attach to the first diff --git a/scripts/local_testnet/anvil_test_node.sh b/scripts/local_testnet/anvil_test_node.sh new file mode 100755 index 0000000000..41be917560 --- /dev/null +++ b/scripts/local_testnet/anvil_test_node.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +source ./vars.env + +exec anvil \ + --balance 1000000000 \ + --gas-limit 1000000000 \ + --accounts 10 \ + --mnemonic "$ETH1_NETWORK_MNEMONIC" \ + --block-time $SECONDS_PER_ETH1_BLOCK \ + --port 8545 \ + --chain-id "$CHAIN_ID" diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index ac61b54dfb..1a04d12d4a 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -30,6 +30,8 @@ while getopts "d:sh" flag; do echo " DATADIR Value for --datadir parameter" echo " NETWORK-PORT Value for --enr-udp-port, --enr-tcp-port and --port" echo " HTTP-PORT Value for --http-port" + echo " EXECUTION-ENDPOINT Value for --execution-endpoint" + echo " EXECUTION-JWT Value for --execution-jwt" exit ;; esac @@ -39,14 +41,19 @@ done data_dir=${@:$OPTIND+0:1} network_port=${@:$OPTIND+1:1} http_port=${@:$OPTIND+2:1} +execution_endpoint=${@:$OPTIND+3:1} +execution_jwt=${@:$OPTIND+4:1} -exec lighthouse \ +lighthouse_binary=lighthouse + +exec $lighthouse_binary \ --debug-level $DEBUG_LEVEL \ bn \ $SUBSCRIBE_ALL_SUBNETS \ --datadir $data_dir \ --testnet-dir $TESTNET_DIR \ --enable-private-discovery \ + --disable-peer-scoring \ --staking \ --enr-address 127.0.0.1 \ --enr-udp-port $network_port \ @@ -54,4 +61,6 @@ exec lighthouse \ --port $network_port \ --http-port $http_port \ --disable-packet-filter \ - --target-peers $((BN_COUNT - 1)) + --target-peers $((BN_COUNT - 1)) \ + --execution-endpoint $execution_endpoint \ + --execution-jwt $execution_jwt diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh new file mode 100755 index 0000000000..d73a463f6d --- /dev/null +++ b/scripts/local_testnet/el_bootnode.sh @@ -0,0 +1,3 @@ +priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" +source ./vars.env +$EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh deleted file mode 100755 index a489c33224..0000000000 --- a/scripts/local_testnet/ganache_test_node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -Eeuo pipefail - -source ./vars.env - -exec ganache \ - --defaultBalanceEther 1000000000 \ - --gasLimit 1000000000 \ - --accounts 10 \ - --mnemonic "$ETH1_NETWORK_MNEMONIC" \ - --port 8545 \ - --blockTime $SECONDS_PER_ETH1_BLOCK \ - --chain.chainId "$CHAIN_ID" diff --git a/scripts/local_testnet/genesis.json b/scripts/local_testnet/genesis.json new file mode 100644 index 0000000000..3ac553e55b --- /dev/null +++ b/scripts/local_testnet/genesis.json @@ -0,0 +1,861 @@ +{ + "config": { + "chainId": 4242, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "mergeNetsplitBlock": 0, + "shanghaiTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true + }, + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x6d6172697573766477000000" + }, + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, + "0x0000000000000000000000000000000000000000": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000008": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000009": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000010": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000011": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000012": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000013": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000014": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000015": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000016": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000017": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000018": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000019": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000020": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000021": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000022": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000023": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000024": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000025": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000026": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000027": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000028": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000029": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000030": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000031": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000032": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000033": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000034": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000035": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000036": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000037": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000038": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000039": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000040": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000041": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000042": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000043": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000044": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000045": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000046": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000047": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000048": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000049": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000050": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000051": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000052": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000053": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000054": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000055": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000056": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000057": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000058": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000059": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000060": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000061": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000062": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000063": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000064": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000065": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000066": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000067": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000068": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000069": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000070": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000071": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000072": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000073": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000074": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000075": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000076": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000077": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000078": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000079": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000080": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000081": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000082": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000083": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000084": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000085": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000086": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000087": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000088": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000089": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000090": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000091": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000092": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000093": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000094": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000095": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000096": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000097": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000098": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000099": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009f": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000aa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ab": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ac": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ad": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ae": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000af": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ba": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000be": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ca": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ce": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000da": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000db": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000de": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000df": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ea": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000eb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ec": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ed": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ee": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ef": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fe": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ff": { + "balance": "1" + }, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + }, + "0x9a4aa7d9C2F6386e5F24d790eB2FFB9fd543A170": { + "balance": "1000000000000000000000000000" + }, + "0x5E3141B900ac5f5608b0d057D10d45a0e4927cD9": { + "balance": "1000000000000000000000000000" + }, + "0x7cF5Dbc49F0904065664b5B6C0d69CaB55F33988": { + "balance": "1000000000000000000000000000" + }, + "0x8D12b071A6F3823A535D38C4a583a2FA1859e822": { + "balance": "1000000000000000000000000000" + }, + "0x3B575D3cda6b30736A38B031E0d245E646A21135": { + "balance": "1000000000000000000000000000" + }, + "0x53bDe6CF93461674F590E532006b4022dA57A724": { + "balance": "1000000000000000000000000000" + } + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x01", + "extraData": "", + "gasLimit": "0x400000", + "nonce": "0x1234", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "1662465600" +} \ No newline at end of file diff --git a/scripts/local_testnet/geth.sh b/scripts/local_testnet/geth.sh new file mode 100755 index 0000000000..d3923cdd89 --- /dev/null +++ b/scripts/local_testnet/geth.sh @@ -0,0 +1,54 @@ +set -Eeuo pipefail + +source ./vars.env + +# Get options +while getopts "d:sh" flag; do + case "${flag}" in + d) DEBUG_LEVEL=${OPTARG};; + s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";; + h) + echo "Start a geth node" + echo + echo "usage: $0 " + echo + echo "Options:" + echo " -h: this help" + echo + echo "Positional arguments:" + echo " DATADIR Value for --datadir parameter" + echo " NETWORK-PORT Value for --port" + echo " HTTP-PORT Value for --http.port" + echo " AUTH-PORT Value for --authrpc.port" + echo " GENESIS_FILE Value for geth init" + exit + ;; + esac +done + +# Get positional arguments +data_dir=${@:$OPTIND+0:1} +network_port=${@:$OPTIND+1:1} +http_port=${@:$OPTIND+2:1} +auth_port=${@:$OPTIND+3:1} +genesis_file=${@:$OPTIND+4:1} + + +# Init +$GETH_BINARY init \ + --datadir $data_dir \ + $genesis_file + +echo "Completed init" + +exec $GETH_BINARY \ + --datadir $data_dir \ + --ipcdisable \ + --http \ + --http.api="engine,eth,web3,net,debug" \ + --networkid=$CHAIN_ID \ + --syncmode=full \ + --bootnodes $EL_BOOTNODE_ENODE \ + --port $network_port \ + --http.port $http_port \ + --authrpc.port $auth_port \ No newline at end of file diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index d63725ac14..83a0027337 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -12,7 +12,7 @@ if [ -f "$1" ]; then [[ -n "$pid" ]] || continue echo killing $pid - kill $pid + kill $pid || true done < $1 fi diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 82336984af..283aa0c026 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash # -# Deploys the deposit contract and makes deposits for $VALIDATOR_COUNT insecure deterministic validators. # Produces a testnet specification and a genesis state where the genesis time # is now + $GENESIS_DELAY. # @@ -13,11 +12,6 @@ set -o nounset -o errexit -o pipefail source ./vars.env -lcli \ - deploy-deposit-contract \ - --eth1-http http://localhost:8545 \ - --confirmations 1 \ - --validator-count $VALIDATOR_COUNT NOW=`date +%s` GENESIS_TIME=`expr $NOW + $GENESIS_DELAY` @@ -32,14 +26,20 @@ lcli \ --genesis-delay $GENESIS_DELAY \ --genesis-fork-version $GENESIS_FORK_VERSION \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ + --bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \ + --capella-fork-epoch $CAPELLA_FORK_EPOCH \ + --ttd $TTD \ + --eth1-block-hash $ETH1_BLOCK_HASH \ --eth1-id $CHAIN_ID \ --eth1-follow-distance 1 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ + --validator-count $GENESIS_VALIDATOR_COUNT \ + --interop-genesis-state \ --force -echo Specification generated at $TESTNET_DIR. +echo Specification and genesis.ssz generated at $TESTNET_DIR. echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)" lcli \ @@ -49,13 +49,3 @@ lcli \ --node-count $BN_COUNT echo Validators generated with keystore passwords at $DATADIR. -echo "Building genesis state... (this might take a while)" - -lcli \ - interop-genesis \ - --spec $SPEC_PRESET \ - --genesis-time $GENESIS_TIME \ - --testnet-dir $TESTNET_DIR \ - $GENESIS_VALIDATOR_COUNT - -echo Created genesis state in $TESTNET_DIR diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index e3aba5c3ad..64111d5627 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -40,6 +40,8 @@ if (( $VC_COUNT > $BN_COUNT )); then exit fi +genesis_file=${@:$OPTIND+0:1} + # Init some constants PID_FILE=$TESTNET_DIR/PIDS.pid LOG_DIR=$TESTNET_DIR @@ -55,6 +57,9 @@ mkdir -p $LOG_DIR for (( bn=1; bn<=$BN_COUNT; bn++ )); do touch $LOG_DIR/beacon_node_$bn.log done +for (( el=1; el<=$BN_COUNT; el++ )); do + touch $LOG_DIR/geth_$el.log +done for (( vc=1; vc<=$VC_COUNT; vc++ )); do touch $LOG_DIR/validator_node_$vc.log done @@ -92,29 +97,49 @@ execute_command_add_PID() { echo "$!" >> $PID_FILE } -# Start ganache, setup things up and start the bootnode. -# The delays are necessary, hopefully there is a better way :( - -# Delay to let ganache to get started -execute_command_add_PID ganache_test_node.log ./ganache_test_node.sh -sleeping 10 # Setup data echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 +# Update future hardforks time in the EL genesis file based on the CL genesis time +GENESIS_TIME=$(lcli pretty-ssz state_merge $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +echo $GENESIS_TIME +CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) +echo $CAPELLA_TIME +sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file +cat $genesis_file + # Delay to let boot_enr.yaml to be created execute_command_add_PID bootnode.log ./bootnode.sh sleeping 1 +execute_command_add_PID el_bootnode.log ./el_bootnode.sh +sleeping 1 + # Start beacon nodes BN_udp_tcp_base=9000 BN_http_port_base=8000 +EL_base_network=7000 +EL_base_http=6000 +EL_base_auth_http=5000 + (( $VC_COUNT < $BN_COUNT )) && SAS=-s || SAS= +for (( el=1; el<=$BN_COUNT; el++ )); do + execute_command_add_PID geth_$el.log ./geth.sh $DATADIR/geth_datadir$el $((EL_base_network + $el)) $((EL_base_http + $el)) $((EL_base_auth_http + $el)) $genesis_file +done + +sleeping 20 + +# Reset the `genesis.json` config file fork times. +sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' $genesis_file + for (( bn=1; bn<=$BN_COUNT; bn++ )); do - execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn)) + secret=$DATADIR/geth_datadir$bn/geth/jwtsecret + echo $secret + execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret done # Start requested number of validator clients diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 975a2a6753..d88a1833cb 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -30,4 +30,5 @@ exec lighthouse \ --testnet-dir $TESTNET_DIR \ --init-slashing-protection \ --beacon-nodes ${@:$OPTIND+1:1} \ + --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 \ $VC_ARGS diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 1ade173286..6e05f0c411 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -1,17 +1,26 @@ +# Path to the geth binary +GETH_BINARY=geth +EL_BOOTNODE_BINARY=bootnode + # Base directories for the validator keys and secrets DATADIR=~/.lighthouse/local-testnet # Directory for the eth2 config TESTNET_DIR=$DATADIR/testnet -# Mnemonic for the ganache test network -ETH1_NETWORK_MNEMONIC="vast thought differ pull jewel broom cook wrist tribe word before omit" +# Mnemonic for generating validator keys +MNEMONIC_PHRASE="vast thought differ pull jewel broom cook wrist tribe word before omit" -# Hardcoded deposit contract based on ETH1_NETWORK_MNEMONIC -DEPOSIT_CONTRACT_ADDRESS=8c594691c0e592ffa21f153a16ae41db5befcaaa +EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" + +# Hardcoded deposit contract +DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 +# Block hash generated from genesis.json in directory +ETH1_BLOCK_HASH=4b0e17cf5c04616d64526d292b80a1f2720cf2195d990006e4ea6950c5bbcb9f + VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 @@ -33,7 +42,11 @@ BOOTNODE_PORT=4242 CHAIN_ID=4242 # Hard fork configuration -ALTAIR_FORK_EPOCH=18446744073709551615 +ALTAIR_FORK_EPOCH=0 +BELLATRIX_FORK_EPOCH=0 +CAPELLA_FORK_EPOCH=1 + +TTD=0 # Spec version (mainnet or minimal) SPEC_PRESET=mainnet diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 95dfff5696..e9d3e39ce5 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Requires `lighthouse`, ``lcli`, `ganache`, `curl`, `jq` +# Requires `lighthouse`, `lcli`, `geth`, `bootnode`, `curl`, `jq` BEHAVIOR=$1 @@ -15,21 +15,15 @@ exit_if_fails() { $@ EXIT_CODE=$? if [[ $EXIT_CODE -eq 1 ]]; then - exit 111 + exit 1 fi } +genesis_file=$2 source ./vars.env exit_if_fails ../local_testnet/clean.sh -echo "Starting ganache" - -exit_if_fails ../local_testnet/ganache_test_node.sh &> /dev/null & -GANACHE_PID=$! - -# Wait for ganache to start -sleep 5 echo "Setting up local testnet" @@ -41,28 +35,31 @@ exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/loc echo "Starting bootnode" exit_if_fails ../local_testnet/bootnode.sh &> /dev/null & -BOOT_PID=$! + +exit_if_fails ../local_testnet/el_bootnode.sh &> /dev/null & # wait for the bootnode to start sleep 10 +echo "Starting local execution nodes" + +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir1 7000 6000 5000 $genesis_file &> geth.log & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir2 7100 6100 5100 $genesis_file &> /dev/null & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir3 7200 6200 5200 $genesis_file &> /dev/null & + +sleep 20 + echo "Starting local beacon nodes" -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & -BEACON_PID=$! -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & -BEACON_PID2=$! -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & -BEACON_PID3=$! +exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:5000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> beacon1.log & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 http://localhost:5100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 http://localhost:5200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & echo "Starting local validator clients" exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & -VALIDATOR_1_PID=$! exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & -VALIDATOR_2_PID=$! exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & -VALIDATOR_3_PID=$! echo "Waiting an epoch before starting the next validator client" sleep $(( $SECONDS_PER_SLOT * 32 )) @@ -71,7 +68,7 @@ if [[ "$BEHAVIOR" == "failure" ]]; then echo "Starting the doppelganger validator client" - # Use same keys as keys from VC1, but connect to BN2 + # Use same keys as keys from VC1 and connect to BN2 # This process should not last longer than 2 epochs timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1_doppelganger http://localhost:8100 DOPPELGANGER_EXIT=$? @@ -79,7 +76,9 @@ if [[ "$BEHAVIOR" == "failure" ]]; then echo "Shutting down" # Cleanup - kill $BOOT_PID $BEACON_PID $BEACON_PID2 $BEACON_PID3 $GANACHE_PID $VALIDATOR_1_PID $VALIDATOR_2_PID $VALIDATOR_3_PID + killall geth + killall lighthouse + killall bootnode echo "Done" @@ -98,7 +97,6 @@ if [[ "$BEHAVIOR" == "success" ]]; then echo "Starting the last validator client" ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_4 http://localhost:8100 & - VALIDATOR_4_PID=$! DOPPELGANGER_FAILURE=0 # Sleep three epochs, then make sure all validators were active in epoch 2. Use @@ -144,7 +142,10 @@ if [[ "$BEHAVIOR" == "success" ]]; then # Cleanup cd $PREVIOUS_DIR - kill $BOOT_PID $BEACON_PID $BEACON_PID2 $BEACON_PID3 $GANACHE_PID $VALIDATOR_1_PID $VALIDATOR_2_PID $VALIDATOR_3_PID $VALIDATOR_4_PID + + killall geth + killall lighthouse + killall bootnode echo "Done" @@ -153,4 +154,4 @@ if [[ "$BEHAVIOR" == "success" ]]; then fi fi -exit 0 +exit 0 \ No newline at end of file diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json new file mode 100644 index 0000000000..ec3cd1e813 --- /dev/null +++ b/scripts/tests/genesis.json @@ -0,0 +1,851 @@ +{ + "config": { + "chainId": 4242, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "mergeForkBlock": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true + }, + "alloc": { + "0x0000000000000000000000000000000000000000": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000008": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000009": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000010": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000011": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000012": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000013": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000014": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000015": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000016": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000017": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000018": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000019": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000020": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000021": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000022": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000023": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000024": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000025": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000026": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000027": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000028": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000029": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000030": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000031": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000032": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000033": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000034": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000035": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000036": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000037": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000038": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000039": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000040": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000041": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000042": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000043": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000044": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000045": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000046": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000047": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000048": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000049": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000050": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000051": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000052": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000053": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000054": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000055": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000056": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000057": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000058": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000059": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000060": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000061": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000062": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000063": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000064": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000065": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000066": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000067": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000068": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000069": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000070": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000071": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000072": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000073": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000074": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000075": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000076": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000077": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000078": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000079": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000080": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000081": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000082": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000083": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000084": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000085": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000086": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000087": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000088": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000089": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000090": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000091": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000092": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000093": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000094": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000095": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000096": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000097": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000098": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000099": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009f": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000aa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ab": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ac": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ad": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ae": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000af": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ba": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000be": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ca": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ce": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000da": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000db": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000de": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000df": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ea": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000eb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ec": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ed": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ee": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ef": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fe": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ff": { + "balance": "1" + }, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + }, + "0x9a4aa7d9C2F6386e5F24d790eB2FFB9fd543A170": { + "balance": "1000000000000000000000000000" + }, + "0x5E3141B900ac5f5608b0d057D10d45a0e4927cD9": { + "balance": "1000000000000000000000000000" + }, + "0x7cF5Dbc49F0904065664b5B6C0d69CaB55F33988": { + "balance": "1000000000000000000000000000" + }, + "0x8D12b071A6F3823A535D38C4a583a2FA1859e822": { + "balance": "1000000000000000000000000000" + }, + "0x3B575D3cda6b30736A38B031E0d245E646A21135": { + "balance": "1000000000000000000000000000" + }, + "0x53bDe6CF93461674F590E532006b4022dA57A724": { + "balance": "1000000000000000000000000000" + } + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x01", + "extraData": "", + "gasLimit": "0x400000", + "nonce": "0x1234", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "1662465600" +} \ No newline at end of file diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 778a0afca5..a7e696ec0a 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -1,17 +1,23 @@ +# Path to the geth binary +GETH_BINARY=geth +EL_BOOTNODE_BINARY=bootnode + # Base directories for the validator keys and secrets DATADIR=~/.lighthouse/local-testnet # Directory for the eth2 config TESTNET_DIR=$DATADIR/testnet -# Mnemonic for the ganache test network -ETH1_NETWORK_MNEMONIC="vast thought differ pull jewel broom cook wrist tribe word before omit" +EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" -# Hardcoded deposit contract based on ETH1_NETWORK_MNEMONIC -DEPOSIT_CONTRACT_ADDRESS=8c594691c0e592ffa21f153a16ae41db5befcaaa +# Hardcoded deposit contract +DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 +# Block hash generated from genesis.json in directory +ETH1_BLOCK_HASH=16ef16304456fdacdeb272bd70207021031db355ed6c5e44ebd34c1ab757e221 + VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 @@ -33,7 +39,12 @@ BOOTNODE_PORT=4242 CHAIN_ID=4242 # Hard fork configuration -ALTAIR_FORK_EPOCH=18446744073709551615 +ALTAIR_FORK_EPOCH=0 +BELLATRIX_FORK_EPOCH=0 +CAPELLA_FORK_EPOCH=18446744073709551615 +DENEB_FORK_EPOCH=18446744073709551615 + +TTD=0 # Spec version (mainnet or minimal) SPEC_PRESET=mainnet @@ -45,7 +56,7 @@ SECONDS_PER_SLOT=3 SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage -PROPOSER_SCORE_BOOST=40 +PROPOSER_SCORE_BOOST=70 # Enable doppelganger detection -VC_ARGS=" --enable-doppelganger-protection " +VC_ARGS=" --enable-doppelganger-protection " \ No newline at end of file diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 7f2ac456b5..bfa7b5f64c 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Michael Sproul "] edition = "2021" [features] -default = ["mdbx"] +default = ["lmdb"] mdbx = ["dep:mdbx"] lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 0a787defa2..63cf1e4649 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -9,7 +9,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } directory = { path = "../../common/directory" } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } network = { path = "../../beacon_node/network" } -slasher = { path = "..", default-features = false } +slasher = { path = ".." } slog = "2.5.2" slot_clock = { path = "../../common/slot_clock" } state_processing = { path = "../../consensus/state_processing" } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index e2a58a406a..361621d176 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -13,15 +13,16 @@ pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; pub const DEFAULT_BROADCAST: bool = false; -#[cfg(feature = "mdbx")] +#[cfg(all(feature = "mdbx", not(feature = "lmdb")))] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Mdbx; -#[cfg(all(feature = "lmdb", not(feature = "mdbx")))] +#[cfg(feature = "lmdb")] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Lmdb; #[cfg(not(any(feature = "mdbx", feature = "lmdb")))] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Disabled; pub const MAX_HISTORY_LENGTH: usize = 1 << 16; pub const MEGABYTE: usize = 1 << 20; +pub const MDBX_DATA_FILENAME: &str = "mdbx.dat"; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -64,6 +65,13 @@ pub enum DatabaseBackend { Disabled, } +#[derive(Debug, PartialEq)] +pub enum DatabaseBackendOverride { + Success(DatabaseBackend), + Failure(PathBuf), + Noop, +} + impl Config { pub fn new(database_path: PathBuf) -> Self { Self { @@ -161,4 +169,28 @@ impl Config { .filter(move |v| self.validator_chunk_index(**v) == validator_chunk_index) .copied() } + + pub fn override_backend(&mut self) -> DatabaseBackendOverride { + let mdbx_path = self.database_path.join(MDBX_DATA_FILENAME); + + #[cfg(feature = "mdbx")] + let already_mdbx = self.backend == DatabaseBackend::Mdbx; + #[cfg(not(feature = "mdbx"))] + let already_mdbx = false; + + if !already_mdbx && mdbx_path.exists() { + #[cfg(feature = "mdbx")] + { + let old_backend = self.backend; + self.backend = DatabaseBackend::Mdbx; + DatabaseBackendOverride::Success(old_backend) + } + #[cfg(not(feature = "mdbx"))] + { + DatabaseBackendOverride::Failure(mdbx_path) + } + } else { + DatabaseBackendOverride::Noop + } + } } diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 132ce8b235..45cbef84f2 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -21,7 +21,7 @@ pub use crate::slasher::Slasher; pub use attestation_queue::{AttestationBatch, AttestationQueue, SimpleBatch}; pub use attester_record::{AttesterRecord, CompactAttesterRecord, IndexedAttesterRecord}; pub use block_queue::BlockQueue; -pub use config::{Config, DatabaseBackend}; +pub use config::{Config, DatabaseBackend, DatabaseBackendOverride}; pub use database::{ interface::{Database, Environment, RwTransaction}, IndexedAttestationId, SlasherDB, diff --git a/slasher/tests/backend.rs b/slasher/tests/backend.rs new file mode 100644 index 0000000000..9e68107de7 --- /dev/null +++ b/slasher/tests/backend.rs @@ -0,0 +1,57 @@ +#![cfg(all(feature = "lmdb"))] + +use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride}; +use std::fs::File; +use tempfile::tempdir; + +#[test] +#[cfg(all(feature = "mdbx", feature = "lmdb"))] +fn override_no_existing_db() { + let tempdir = tempdir().unwrap(); + let mut config = Config::new(tempdir.path().into()); + assert_eq!(config.override_backend(), DatabaseBackendOverride::Noop); +} + +#[test] +#[cfg(all(feature = "mdbx", feature = "lmdb"))] +fn override_with_existing_mdbx_db() { + let tempdir = tempdir().unwrap(); + let mut config = Config::new(tempdir.path().into()); + + File::create(config.database_path.join(MDBX_DATA_FILENAME)).unwrap(); + + assert_eq!( + config.override_backend(), + DatabaseBackendOverride::Success(DatabaseBackend::Lmdb) + ); + assert_eq!(config.backend, DatabaseBackend::Mdbx); +} + +#[test] +#[cfg(all(feature = "mdbx", feature = "lmdb"))] +fn no_override_with_existing_mdbx_db() { + let tempdir = tempdir().unwrap(); + let mut config = Config::new(tempdir.path().into()); + config.backend = DatabaseBackend::Mdbx; + + File::create(config.database_path.join(MDBX_DATA_FILENAME)).unwrap(); + + assert_eq!(config.override_backend(), DatabaseBackendOverride::Noop); + assert_eq!(config.backend, DatabaseBackend::Mdbx); +} + +#[test] +#[cfg(all(not(feature = "mdbx"), feature = "lmdb"))] +fn failed_override_with_existing_mdbx_db() { + let tempdir = tempdir().unwrap(); + let mut config = Config::new(tempdir.path().into()); + + let filename = config.database_path.join(MDBX_DATA_FILENAME); + File::create(&filename).unwrap(); + + assert_eq!( + config.override_backend(), + DatabaseBackendOverride::Failure(filename) + ); + assert_eq!(config.backend, DatabaseBackend::Lmdb); +} diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 6095e1be6b..31542ba447 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -6,9 +6,9 @@ use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; +use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates; use state_processing::per_epoch_processing::{ altair, base, - effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, process_registry_updates, process_slashings, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, @@ -173,7 +173,7 @@ impl EpochTransition for Eth1DataReset { impl EpochTransition for EffectiveBalanceUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_effective_balance_updates(state, spec) + process_effective_balance_updates(state, None, spec) } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 4f5d998301..9627d2cde0 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer, + BeaconChainTypes, CachedHead, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -18,7 +18,8 @@ use std::sync::Arc; use std::time::Duration; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, - ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode, + SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -381,8 +382,8 @@ impl Tester { let result = self.block_on_dangerous(self.harness.chain.process_block( block_root, block.clone(), - CountUnrealized::True, NotifyExecutionLayer::Yes, + || Ok(()), ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( @@ -440,8 +441,9 @@ impl Tester { block_delay, &state, PayloadVerificationStatus::Irrelevant, + ProgressiveBalancesMode::Strict, &self.harness.chain.spec, - CountUnrealized::True, + self.harness.logger(), ); if result.is_ok() { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 5fd00285aa..21a56dcf2a 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,6 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; +use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, @@ -96,6 +97,7 @@ impl Operation for Attestation { spec, ), BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + initialize_progressive_balances_cache(state, None, spec)?; altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) } } @@ -118,6 +120,7 @@ impl Operation for AttesterSlashing { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); + initialize_progressive_balances_cache(state, None, spec)?; process_attester_slashings( state, &[self.clone()], @@ -168,6 +171,7 @@ impl Operation for ProposerSlashing { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); + initialize_progressive_balances_cache(state, None, spec)?; process_proposer_slashings( state, &[self.clone()], diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index e51fed1907..191b45c33a 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -67,7 +67,7 @@ impl Case for SanityBlocks { let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. - bulk_state.build_all_caches(spec).unwrap(); + bulk_state.build_caches(spec).unwrap(); // Spawning a second state to call the VerifyIndiviual strategy to avoid bitrot. // See https://github.com/sigp/lighthouse/issues/742. diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index a38a8930a0..dd385d13f4 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -61,7 +61,7 @@ impl Case for SanitySlots { let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); + state.build_caches(spec).unwrap(); let mut result = (0..self.slots) .try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ())) diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 08766f14fc..5c78c09022 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" [dependencies] tokio = { version = "1.14.0", features = ["time"] } -web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } +ethers-core = "1.0.2" +ethers-providers = "1.0.2" +ethers-contract = "1.0.2" types = { path = "../../consensus/types"} serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} unused_port = { path = "../../common/unused_port" } +hex = "0.4.2" diff --git a/testing/eth1_test_rig/src/anvil.rs b/testing/eth1_test_rig/src/anvil.rs new file mode 100644 index 0000000000..1b86711c2f --- /dev/null +++ b/testing/eth1_test_rig/src/anvil.rs @@ -0,0 +1,101 @@ +use ethers_core::utils::{Anvil, AnvilInstance}; +use ethers_providers::{Http, Middleware, Provider}; +use serde_json::json; +use std::convert::TryFrom; +use unused_port::unused_tcp4_port; + +/// Provides a dedicated `anvil` instance. +/// +/// Requires that `anvil` is installed and available on `PATH`. +pub struct AnvilCliInstance { + pub port: u16, + pub anvil: AnvilInstance, + pub client: Provider, + chain_id: u64, +} + +impl AnvilCliInstance { + fn new_from_child(anvil_instance: Anvil, chain_id: u64, port: u16) -> Result { + let client = Provider::::try_from(&endpoint(port)) + .map_err(|e| format!("Failed to start HTTP transport connected to anvil: {:?}", e))?; + Ok(Self { + port, + anvil: anvil_instance.spawn(), + client, + chain_id, + }) + } + pub fn new(chain_id: u64) -> Result { + let port = unused_tcp4_port()?; + + let anvil = Anvil::new() + .port(port) + .mnemonic("vast thought differ pull jewel broom cook wrist tribe word before omit") + .arg("--balance") + .arg("1000000000") + .arg("--gas-limit") + .arg("1000000000") + .arg("--accounts") + .arg("10") + .arg("--chain-id") + .arg(format!("{}", chain_id)); + + Self::new_from_child(anvil, chain_id, port) + } + + pub fn fork(&self) -> Result { + let port = unused_tcp4_port()?; + + let anvil = Anvil::new() + .port(port) + .arg("--chain-id") + .arg(format!("{}", self.chain_id())) + .fork(self.endpoint()); + + Self::new_from_child(anvil, self.chain_id, port) + } + + /// Returns the endpoint that this instance is listening on. + pub fn endpoint(&self) -> String { + endpoint(self.port) + } + + /// Returns the chain id of the anvil instance + pub fn chain_id(&self) -> u64 { + self.chain_id + } + + /// Increase the timestamp on future blocks by `increase_by` seconds. + pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { + self.client + .request("evm_increaseTime", vec![json!(increase_by)]) + .await + .map(|_json_value: u64| ()) + .map_err(|e| format!("Failed to increase time on EVM (is this anvil?): {:?}", e)) + } + + /// Returns the current block number, as u64 + pub async fn block_number(&self) -> Result { + self.client + .get_block_number() + .await + .map(|v| v.as_u64()) + .map_err(|e| format!("Failed to get block number: {:?}", e)) + } + + /// Mines a single block. + pub async fn evm_mine(&self) -> Result<(), String> { + self.client + .request("evm_mine", ()) + .await + .map(|_: String| ()) + .map_err(|_| { + "utils should mine new block with evm_mine (only works with anvil/ganache!)" + .to_string() + }) + } +} + +fn endpoint(port: u16) -> String { + format!("http://127.0.0.1:{}", port) +} diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs deleted file mode 100644 index 898a089ba0..0000000000 --- a/testing/eth1_test_rig/src/ganache.rs +++ /dev/null @@ -1,193 +0,0 @@ -use serde_json::json; -use std::io::prelude::*; -use std::io::BufReader; -use std::process::{Child, Command, Stdio}; -use std::time::{Duration, Instant}; -use unused_port::unused_tcp4_port; -use web3::{transports::Http, Transport, Web3}; - -/// How long we will wait for ganache to indicate that it is ready. -const GANACHE_STARTUP_TIMEOUT_MILLIS: u64 = 10_000; - -/// Provides a dedicated `ganachi-cli` instance with a connected `Web3` instance. -/// -/// Requires that `ganachi-cli` is installed and available on `PATH`. -pub struct GanacheInstance { - pub port: u16, - child: Child, - pub web3: Web3, - chain_id: u64, -} - -impl GanacheInstance { - fn new_from_child(mut child: Child, port: u16, chain_id: u64) -> Result { - let stdout = child - .stdout - .ok_or("Unable to get stdout for ganache child process")?; - - let start = Instant::now(); - let mut reader = BufReader::new(stdout); - loop { - if start + Duration::from_millis(GANACHE_STARTUP_TIMEOUT_MILLIS) <= Instant::now() { - break Err( - "Timed out waiting for ganache to start. Is ganache installed?".to_string(), - ); - } - - let mut line = String::new(); - if let Err(e) = reader.read_line(&mut line) { - break Err(format!("Failed to read line from ganache process: {:?}", e)); - } else if line.starts_with("RPC Listening on") { - break Ok(()); - } else { - continue; - } - }?; - - let transport = Http::new(&endpoint(port)).map_err(|e| { - format!( - "Failed to start HTTP transport connected to ganache: {:?}", - e - ) - })?; - let web3 = Web3::new(transport); - - child.stdout = Some(reader.into_inner()); - - Ok(Self { - port, - child, - web3, - chain_id, - }) - } - - /// Start a new `ganache` process, waiting until it indicates that it is ready to accept - /// RPC connections. - pub fn new(chain_id: u64) -> Result { - let port = unused_tcp4_port()?; - let binary = match cfg!(windows) { - true => "ganache.cmd", - false => "ganache", - }; - let child = Command::new(binary) - .stdout(Stdio::piped()) - .arg("--defaultBalanceEther") - .arg("1000000000") - .arg("--gasLimit") - .arg("1000000000") - .arg("--accounts") - .arg("10") - .arg("--port") - .arg(format!("{}", port)) - .arg("--mnemonic") - .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") - .arg("--chain.chainId") - .arg(format!("{}", chain_id)) - .spawn() - .map_err(|e| { - format!( - "Failed to start {}. \ - Is it installed and available on $PATH? Error: {:?}", - binary, e - ) - })?; - - Self::new_from_child(child, port, chain_id) - } - - pub fn fork(&self) -> Result { - let port = unused_tcp4_port()?; - let binary = match cfg!(windows) { - true => "ganache.cmd", - false => "ganache", - }; - let child = Command::new(binary) - .stdout(Stdio::piped()) - .arg("--fork") - .arg(self.endpoint()) - .arg("--port") - .arg(format!("{}", port)) - .arg("--chain.chainId") - .arg(format!("{}", self.chain_id)) - .spawn() - .map_err(|e| { - format!( - "Failed to start {}. \ - Is it installed and available on $PATH? Error: {:?}", - binary, e - ) - })?; - - Self::new_from_child(child, port, self.chain_id) - } - - /// Returns the endpoint that this instance is listening on. - pub fn endpoint(&self) -> String { - endpoint(self.port) - } - - /// Returns the chain id of the ganache instance - pub fn chain_id(&self) -> u64 { - self.chain_id - } - - /// Increase the timestamp on future blocks by `increase_by` seconds. - pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { - self.web3 - .transport() - .execute("evm_increaseTime", vec![json!(increase_by)]) - .await - .map(|_json_value| ()) - .map_err(|e| format!("Failed to increase time on EVM (is this ganache?): {:?}", e)) - } - - /// Returns the current block number, as u64 - pub async fn block_number(&self) -> Result { - self.web3 - .eth() - .block_number() - .await - .map(|v| v.as_u64()) - .map_err(|e| format!("Failed to get block number: {:?}", e)) - } - - /// Mines a single block. - pub async fn evm_mine(&self) -> Result<(), String> { - self.web3 - .transport() - .execute("evm_mine", vec![]) - .await - .map(|_| ()) - .map_err(|_| { - "utils should mine new block with evm_mine (only works with ganache!)".to_string() - }) - } -} - -fn endpoint(port: u16) -> String { - format!("http://127.0.0.1:{}", port) -} - -impl Drop for GanacheInstance { - fn drop(&mut self) { - if cfg!(windows) { - // Calling child.kill() in Windows will only kill the process - // that spawned ganache, leaving the actual ganache process - // intact. You have to kill the whole process tree. What's more, - // if you don't spawn ganache with --keepAliveTimeout=0, Windows - // will STILL keep the server running even after you've ended - // the process tree and it's disappeared from the task manager. - // Unbelievable... - Command::new("taskkill") - .arg("/pid") - .arg(self.child.id().to_string()) - .arg("/T") - .arg("/F") - .output() - .expect("failed to execute taskkill"); - } else { - let _ = self.child.kill(); - } - } -} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 42081a60e7..0063975ee1 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -1,77 +1,79 @@ //! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. //! -//! Presently used with [`ganache`](https://github.com/trufflesuite/ganache) to simulate +//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil) to simulate //! the deposit contract for testing beacon node eth1 integration. //! //! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be //! some initial issues. -mod ganache; +mod anvil; +use anvil::AnvilCliInstance; use deposit_contract::{ encode_eth1_tx_data, testnet, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, }; -use ganache::GanacheInstance; +use ethers_contract::Contract; +use ethers_core::{ + abi::Abi, + types::{transaction::eip2718::TypedTransaction, Address, Bytes, TransactionRequest, U256}, +}; +pub use ethers_providers::{Http, Middleware, Provider}; use std::time::Duration; use tokio::time::sleep; use types::DepositData; use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; -use web3::contract::{Contract, Options}; -use web3::transports::Http; -use web3::types::{Address, TransactionRequest, U256}; -use web3::Web3; pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; -/// Provides a dedicated ganache instance with the deposit contract already deployed. -pub struct GanacheEth1Instance { - pub ganache: GanacheInstance, +/// Provides a dedicated anvil instance with the deposit contract already deployed. +pub struct AnvilEth1Instance { + pub anvil: AnvilCliInstance, pub deposit_contract: DepositContract, } -impl GanacheEth1Instance { +impl AnvilEth1Instance { pub async fn new(chain_id: u64) -> Result { - let ganache = GanacheInstance::new(chain_id)?; - DepositContract::deploy(ganache.web3.clone(), 0, None) + let anvil = AnvilCliInstance::new(chain_id)?; + DepositContract::deploy(anvil.client.clone(), 0, None) .await .map(|deposit_contract| Self { - ganache, + anvil, deposit_contract, }) } pub fn endpoint(&self) -> String { - self.ganache.endpoint() + self.anvil.endpoint() } - pub fn web3(&self) -> Web3 { - self.ganache.web3.clone() + pub fn json_rpc_client(&self) -> Provider { + self.anvil.client.clone() } } /// Deploys and provides functions for the eth2 deposit contract, deployed on the eth1 chain. #[derive(Clone, Debug)] pub struct DepositContract { - web3: Web3, - contract: Contract, + client: Provider, + contract: Contract>, } impl DepositContract { pub async fn deploy( - web3: Web3, + client: Provider, confirmations: usize, password: Option, ) -> Result { - Self::deploy_bytecode(web3, confirmations, BYTECODE, ABI, password).await + Self::deploy_bytecode(client, confirmations, BYTECODE, ABI, password).await } pub async fn deploy_testnet( - web3: Web3, + client: Provider, confirmations: usize, password: Option, ) -> Result { Self::deploy_bytecode( - web3, + client, confirmations, testnet::BYTECODE, testnet::ABI, @@ -81,29 +83,25 @@ impl DepositContract { } async fn deploy_bytecode( - web3: Web3, + client: Provider, confirmations: usize, bytecode: &[u8], abi: &[u8], password: Option, ) -> Result { - let address = deploy_deposit_contract( - web3.clone(), - confirmations, - bytecode.to_vec(), - abi.to_vec(), - password, - ) - .await - .map_err(|e| { - format!( - "Failed to deploy contract: {}. Is scripts/ganache_tests_node.sh running?.", - e - ) - })?; - Contract::from_json(web3.clone().eth(), address, ABI) - .map_err(|e| format!("Failed to init contract: {:?}", e)) - .map(move |contract| Self { web3, contract }) + let abi = Abi::load(abi).map_err(|e| format!("Invalid deposit contract abi: {:?}", e))?; + let address = + deploy_deposit_contract(client.clone(), confirmations, bytecode.to_vec(), password) + .await + .map_err(|e| { + format!( + "Failed to deploy contract: {}. Is scripts/anvil_tests_node.sh running?.", + e + ) + })?; + + let contract = Contract::new(address, abi, client.clone()); + Ok(Self { client, contract }) } /// The deposit contract's address in `0x00ab...` format. @@ -178,9 +176,8 @@ impl DepositContract { /// Performs a non-blocking deposit. pub async fn deposit_async(&self, deposit_data: DepositData) -> Result<(), String> { let from = self - .web3 - .eth() - .accounts() + .client + .get_accounts() .await .map_err(|e| format!("Failed to get accounts: {:?}", e)) .and_then(|accounts| { @@ -189,32 +186,33 @@ impl DepositContract { .cloned() .ok_or_else(|| "Insufficient accounts for deposit".to_string()) })?; - let tx_request = TransactionRequest { - from, - to: Some(self.contract.address()), - gas: Some(U256::from(DEPOSIT_GAS)), - gas_price: None, - max_fee_per_gas: None, - max_priority_fee_per_gas: None, - value: Some(from_gwei(deposit_data.amount)), - // Note: the reason we use this `TransactionRequest` instead of just using the - // function in `self.contract` is so that the `eth1_tx_data` function gets used - // during testing. - // - // It's important that `eth1_tx_data` stays correct and does not suffer from - // code-rot. - data: encode_eth1_tx_data(&deposit_data).map(Into::into).ok(), - nonce: None, - condition: None, - transaction_type: None, - access_list: None, - }; + // Note: the reason we use this `TransactionRequest` instead of just using the + // function in `self.contract` is so that the `eth1_tx_data` function gets used + // during testing. + // + // It's important that `eth1_tx_data` stays correct and does not suffer from + // code-rot. + let tx_request = TransactionRequest::new() + .from(from) + .to(self.contract.address()) + .gas(DEPOSIT_GAS) + .value(from_gwei(deposit_data.amount)) + .data(Bytes::from(encode_eth1_tx_data(&deposit_data).map_err( + |e| format!("Failed to encode deposit data: {:?}", e), + )?)); - self.web3 - .eth() - .send_transaction(tx_request) + let pending_tx = self + .client + .send_transaction(tx_request, None) .await .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; + + pending_tx + .interval(Duration::from_millis(10)) + .confirmations(0) + .await + .map_err(|e| format!("Transaction failed to resolve: {:?}", e))? + .ok_or_else(|| "Transaction dropped from mempool".to_string())?; Ok(()) } @@ -245,17 +243,13 @@ fn from_gwei(gwei: u64) -> U256 { /// Deploys the deposit contract to the given web3 instance using the account with index /// `DEPLOYER_ACCOUNTS_INDEX`. async fn deploy_deposit_contract( - web3: Web3, + client: Provider, confirmations: usize, bytecode: Vec, - abi: Vec, password_opt: Option, ) -> Result { - let bytecode = String::from_utf8(bytecode).expect("bytecode must be valid utf8"); - - let from_address = web3 - .eth() - .accounts() + let from_address = client + .get_accounts() .await .map_err(|e| format!("Failed to get accounts: {:?}", e)) .and_then(|accounts| { @@ -266,30 +260,42 @@ async fn deploy_deposit_contract( })?; let deploy_address = if let Some(password) = password_opt { - let result = web3 - .personal() - .unlock_account(from_address, &password, None) + let result = client + .request( + "personal_unlockAccount", + vec![from_address.to_string(), password], + ) .await; + match result { - Ok(true) => return Ok(from_address), + Ok(true) => from_address, Ok(false) => return Err("Eth1 node refused to unlock account".to_string()), Err(e) => return Err(format!("Eth1 unlock request failed: {:?}", e)), - }; + } } else { from_address }; - let pending_contract = Contract::deploy(web3.eth(), &abi) - .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? - .confirmations(confirmations) - .options(Options { - gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), - ..Options::default() - }) - .execute(bytecode, (), deploy_address); + let mut bytecode = String::from_utf8(bytecode).unwrap(); + bytecode.retain(|c| c.is_ascii_hexdigit()); + let bytecode = hex::decode(&bytecode[1..]).unwrap(); - pending_contract + let deploy_tx: TypedTransaction = TransactionRequest::new() + .from(deploy_address) + .data(Bytes::from(bytecode)) + .gas(CONTRACT_DEPLOY_GAS) + .into(); + + let pending_tx = client + .send_transaction(deploy_tx, None) .await - .map(|contract| contract.address()) - .map_err(|e| format!("Unable to resolve pending contract: {:?}", e)) + .map_err(|e| format!("Failed to send tx: {:?}", e))?; + + let tx = pending_tx + .interval(Duration::from_millis(500)) + .confirmations(confirmations) + .await + .map_err(|e| format!("Failed to fetch tx receipt: {:?}", e))?; + tx.and_then(|tx| tx.contract_address) + .ok_or_else(|| "Deposit contract not deployed successfully".to_string()) } diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 485485c6fe..8925f1cc84 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.17.1"; +const NETHERMIND_BRANCH: &str = "release/1.18.2"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 9668ee8cb4..5dc2d5ec84 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .about( "Lighthouse Beacon Chain Simulator creates `n` beacon node and validator clients, \ each with `v` validators. A deposit contract is deployed at the start of the \ - simulation using a local `ganache` instance (you must have `ganache` \ + simulation using a local `anvil` instance (you must have `anvil` \ installed and avaliable on your path). All beacon nodes independently listen \ for genesis from the deposit contract, then start operating. \ \ diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 1699c0e9ee..3e764d27d0 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -2,7 +2,7 @@ use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::GanacheEth1Instance; +use eth1_test_rig::AnvilEth1Instance; use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; @@ -72,6 +72,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, })? .multi_threaded_tokio_runtime()? .build()?; @@ -110,12 +111,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * validators. */ - let ganache_eth1_instance = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; - let deposit_contract = ganache_eth1_instance.deposit_contract; - let chain_id = ganache_eth1_instance.ganache.chain_id(); - let ganache = ganache_eth1_instance.ganache; - let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str()) - .expect("Unable to parse ganache endpoint."); + let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; + let deposit_contract = anvil_eth1_instance.deposit_contract; + let chain_id = anvil_eth1_instance.anvil.chain_id(); + let anvil = anvil_eth1_instance.anvil; + let eth1_endpoint = SensitiveUrl::parse(anvil.endpoint().as_str()) + .expect("Unable to parse anvil endpoint."); let deposit_contract_address = deposit_contract.address(); // Start a timer that produces eth1 blocks on an interval. @@ -123,7 +124,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut interval = tokio::time::interval(eth1_block_time); loop { interval.tick().await; - let _ = ganache.evm_mine().await; + let _ = anvil.evm_mine().await; } }); diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index 922149537c..a19777c5ab 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -1,6 +1,6 @@ //! This crate provides a simluation that creates `n` beacon node and validator clients, each with //! `v` validators. A deposit contract is deployed at the start of the simulation using a local -//! `ganache` instance (you must have `ganache` installed and avaliable on your path). All +//! `anvil` instance (you must have `anvil` installed and avaliable on your path). All //! beacon nodes independently listen for genesis from the deposit contract, then start operating. //! //! As the simulation runs, there are checks made to ensure that all components are running diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index b7598f9fa7..fc18b1cd48 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -54,6 +54,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 5eaed809df..78f7e1ee9f 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -58,6 +58,7 @@ fn syncing_sim( max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 4095a20470..200db73167 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -24,6 +24,7 @@ bincode = "1.3.1" serde_json = "1.0.58" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } tokio = { version = "1.14.0", features = ["time"] } +tokio-stream = { version = "0.1.3", features = ["sync"] } futures = "0.3.7" dirs = "3.0.1" directory = { path = "../common/directory" } diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 3e667429b4..531cec08ac 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -28,7 +28,7 @@ const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updati /// too early, we risk switching nodes between the time of publishing an attestation and publishing /// an aggregate; this may result in a missed aggregation. If we set this time too late, we risk not /// having the correct nodes up and running prior to the start of the slot. -const SLOT_LOOKAHEAD: Duration = Duration::from_secs(1); +const SLOT_LOOKAHEAD: Duration = Duration::from_secs(2); /// Indicates a measurement of latency between the VC and a BN. pub struct LatencyMeasurement { @@ -52,7 +52,7 @@ pub fn start_fallback_updater_service( let future = async move { loop { - beacon_nodes.update_unready_candidates().await; + beacon_nodes.update_all_candidates().await; let sleep_time = beacon_nodes .slot_clock @@ -182,7 +182,10 @@ impl CandidateBeaconNode { spec: &ChainSpec, log: &Logger, ) -> Result<(), CandidateError> { - let new_status = if let Err(e) = self.is_online(log).await { + let previous_status = self.status(RequireSynced::Yes).await; + let was_offline = matches!(previous_status, Err(CandidateError::Offline)); + + let new_status = if let Err(e) = self.is_online(was_offline, log).await { Err(e) } else if let Err(e) = self.is_compatible(spec, log).await { Err(e) @@ -202,7 +205,7 @@ impl CandidateBeaconNode { } /// Checks if the node is reachable. - async fn is_online(&self, log: &Logger) -> Result<(), CandidateError> { + async fn is_online(&self, was_offline: bool, log: &Logger) -> Result<(), CandidateError> { let result = self .beacon_node .get_node_version() @@ -211,12 +214,14 @@ impl CandidateBeaconNode { match result { Ok(version) => { - info!( - log, - "Connected to beacon node"; - "version" => version, - "endpoint" => %self.beacon_node, - ); + if was_offline { + info!( + log, + "Connected to beacon node"; + "version" => version, + "endpoint" => %self.beacon_node, + ); + } Ok(()) } Err(e) => { @@ -385,33 +390,21 @@ impl BeaconNodeFallback { n } - /// Loop through any `self.candidates` that we don't think are online, compatible or synced and - /// poll them to see if their status has changed. + /// Loop through ALL candidates in `self.candidates` and update their sync status. /// - /// We do not poll nodes that are synced to avoid sending additional requests when everything is - /// going smoothly. - pub async fn update_unready_candidates(&self) { - let mut futures = Vec::new(); - for candidate in &self.candidates { - // There is a potential race condition between having the read lock and the write - // lock. The worst case of this race is running `try_become_ready` twice, which is - // acceptable. - // - // Note: `RequireSynced` is always set to false here. This forces us to recheck the sync - // status of nodes that were previously not-synced. - if candidate.status(RequireSynced::Yes).await.is_err() { - // There exists a race-condition that could result in `refresh_status` being called - // when the status does not require refreshing anymore. This is deemed an - // acceptable inefficiency. - futures.push(candidate.refresh_status( - self.slot_clock.as_ref(), - &self.spec, - &self.log, - )); - } - } + /// It is possible for a node to return an unsynced status while continuing to serve + /// low quality responses. To route around this it's best to poll all connected beacon nodes. + /// A previous implementation of this function polled only the unavailable BNs. + pub async fn update_all_candidates(&self) { + let futures = self + .candidates + .iter() + .map(|candidate| { + candidate.refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) + }) + .collect::>(); - //run all updates concurrently and ignore results + // run all updates concurrently and ignore errors let _ = future::join_all(futures).await; } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 61a5a094cd..d22e6c95f3 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -338,35 +338,61 @@ impl BlockService { let log = log.clone(); self.inner.context.executor.spawn( async move { - let publish_result = if builder_proposals { - let mut result = service.clone() + if builder_proposals { + let result = service + .clone() .publish_block::>(slot, validator_pubkey) .await; - match result.as_ref() { + match result { Err(BlockError::Recoverable(e)) => { - error!(log, "Error whilst producing a blinded block, attempting to \ - publish full block"; "error" => ?e); - result = service + error!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "blinded proposal failed, attempting full block" + ); + if let Err(e) = service .publish_block::>(slot, validator_pubkey) - .await; - }, - Err(BlockError::Irrecoverable(e)) => { - error!(log, "Error whilst producing a blinded block, cannot fallback \ - because the block was signed"; "error" => ?e); - }, - _ => {}, + .await + { + // Log a `crit` since a full block + // (non-builder) proposal failed. + crit!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "full block attempted after a blinded failure", + ); + } + } + Err(BlockError::Irrecoverable(e)) => { + // Only log an `error` since it's common for + // builders to timeout on their response, only + // to publish the block successfully themselves. + error!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "this error may or may not result in a missed block", + ) + } + Ok(_) => {} }; - result - } else { - service - .publish_block::>(slot, validator_pubkey) - .await - }; - if let Err(e) = publish_result { + } else if let Err(e) = service + .publish_block::>(slot, validator_pubkey) + .await + { + // Log a `crit` since a full block (non-builder) + // proposal failed. crit!( log, "Error whilst producing block"; - "message" => ?e + "message" => ?e, + "block_slot" => ?slot, + "info" => "proposal did not use a builder", ); } }, diff --git a/validator_client/src/check_synced.rs b/validator_client/src/check_synced.rs index c31457e288..fb88d33dae 100644 --- a/validator_client/src/check_synced.rs +++ b/validator_client/src/check_synced.rs @@ -36,7 +36,10 @@ pub async fn check_synced( } }; - let is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + // Default EL status to "online" for backwards-compatibility with BNs that don't include it. + let el_offline = resp.data.el_offline.unwrap_or(false); + let bn_is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + let is_synced = bn_is_synced && !el_offline; if let Some(log) = log_opt { if !is_synced { @@ -52,6 +55,7 @@ pub async fn check_synced( "sync_distance" => resp.data.sync_distance.as_u64(), "head_slot" => resp.data.head_slot.as_u64(), "endpoint" => %beacon_node, + "el_offline" => el_offline, ); } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 5abc211d83..728357ccc6 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -109,10 +109,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("allow-unsynced") .long("allow-unsynced") - .help( - "If present, the validator client will still poll for duties if the beacon - node is not synced.", - ), + .help("DEPRECATED: this flag does nothing"), ) .arg( Arg::with_name("use-long-timeouts") @@ -358,6 +355,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("true") .takes_value(true), ) + .arg( + Arg::with_name("validator-registration-batch-size") + .long("validator-registration-batch-size") + .value_name("INTEGER") + .help("Defines the number of validators per \ + validator/register_validator request sent to the BN. This value \ + can be reduced to avoid timeouts from builders.") + .default_value("500") + .takes_value(true), + ) /* * Experimental/development options. */ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 1330b26bde..7c662db937 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -77,6 +77,8 @@ pub struct Config { pub disable_run_on_all: bool, /// Enables a service which attempts to measure latency between the VC and BNs. pub enable_latency_measurement_service: bool, + /// Defines the number of validators per `validator/register_validator` request sent to the BN. + pub validator_registration_batch_size: usize, } impl Default for Config { @@ -117,6 +119,7 @@ impl Default for Config { gas_limit: None, disable_run_on_all: false, enable_latency_measurement_service: true, + validator_registration_batch_size: 500, } } } @@ -205,7 +208,13 @@ impl Config { ); } - config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); + if cli_args.is_present("allow-unsynced") { + warn!( + log, + "The --allow-unsynced flag is deprecated"; + "msg" => "it no longer has any effect", + ); + } config.disable_run_on_all = cli_args.is_present("disable-run-on-all"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); @@ -382,6 +391,12 @@ impl Config { config.enable_latency_measurement_service = parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); + config.validator_registration_batch_size = + parse_required(cli_args, "validator-registration-batch-size")?; + if config.validator_registration_batch_size == 0 { + return Err("validator-registration-batch-size cannot be 0".to_string()); + } + /* * Experimental */ diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 3cab6e7821..83cdb936aa 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -147,11 +147,6 @@ pub struct DutiesService { pub slot_clock: T, /// Provides HTTP access to remote beacon nodes. pub beacon_nodes: Arc>, - /// Controls whether or not this function will refuse to interact with non-synced beacon nodes. - /// - /// This functionality is a little redundant since most BNs will likely reject duties when they - /// aren't synced, but we keep it around for an emergency. - pub require_synced: RequireSynced, pub enable_high_validator_count_metrics: bool, pub context: RuntimeContext, pub spec: ChainSpec, @@ -421,7 +416,7 @@ async fn poll_validator_indices( let download_result = duties_service .beacon_nodes .first_success( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( @@ -618,7 +613,7 @@ async fn poll_beacon_attesters( if let Err(e) = duties_service .beacon_nodes .run( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( @@ -856,7 +851,7 @@ async fn post_validator_duties_attester( duties_service .beacon_nodes .first_success( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( @@ -1063,7 +1058,7 @@ async fn poll_beacon_proposers( let download_result = duties_service .beacon_nodes .first_success( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index b9d4d70306..7a852091aa 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -1,4 +1,4 @@ -use crate::beacon_node_fallback::OfflineOnFailure; +use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, @@ -422,7 +422,7 @@ pub async fn poll_sync_committee_duties_for_period { pub spec: ChainSpec, pub config: Config, pub log: Logger, + pub sse_logging_components: Option, pub slot_clock: T, pub _phantom: PhantomData, } @@ -223,6 +227,10 @@ pub fn serve( let api_token_path_inner = api_token_path.clone(); let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // Filter for SEE Logging events + let inner_components = ctx.sse_logging_components.clone(); + let sse_component_filter = warp::any().map(move || inner_components.clone()); + // Create a `warp` filter that provides access to local system information. let system_info = Arc::new(RwLock::new(sysinfo::System::new())); { @@ -371,7 +379,7 @@ pub fn serve( .and(warp::path("graffiti")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(graffiti_file_filter) + .and(graffiti_file_filter.clone()) .and(graffiti_flag_filter) .and(signer.clone()) .and(log_filter.clone()) @@ -654,18 +662,27 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_store_filter.clone()) + .and(graffiti_file_filter) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, validator_store: Arc>, + graffiti_file: Option, signer, task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { + if body.graffiti.is_some() && graffiti_file.is_some() { + return Err(warp_utils::reject::custom_bad_request( + "Unable to update graffiti as the \"--graffiti-file\" flag is set" + .to_string(), + )); + } + + let maybe_graffiti = body.graffiti.clone().map(Into::into); let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); - match ( initialized_validators.is_enabled(&validator_pubkey), initialized_validators.validator(&validator_pubkey.compress()), @@ -678,7 +695,8 @@ pub fn serve( if Some(is_enabled) == body.enabled && initialized_validator.get_gas_limit() == body.gas_limit && initialized_validator.get_builder_proposals() - == body.builder_proposals => + == body.builder_proposals + && initialized_validator.get_graffiti() == maybe_graffiti => { Ok(()) } @@ -691,6 +709,7 @@ pub fn serve( body.enabled, body.gas_limit, body.builder_proposals, + body.graffiti, ), ) .map_err(|e| { @@ -1104,6 +1123,49 @@ pub fn serve( }) }); + // Subscribe to get VC logs via Server side events + // /lighthouse/logs + let get_log_events = warp::path("lighthouse") + .and(warp::path("logs")) + .and(warp::path::end()) + .and(sse_component_filter) + .and_then(|sse_component: Option| { + warp_utils::task::blocking_task(move || { + if let Some(logging_components) = sse_component { + // Build a JSON stream + let s = + BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| { + match msg { + Ok(data) => { + // Serialize to json + match data.to_json_string() { + // Send the json as a Server Sent Event + Ok(json) => Event::default().json_data(json).map_err(|e| { + warp_utils::reject::server_sent_event_error(format!( + "{:?}", + e + )) + }), + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to serialize to JSON {}", e), + )), + } + } + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to receive event {}", e), + )), + } + }); + + Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) + } else { + Err(warp_utils::reject::custom_server_error( + "SSE Logging is not enabled".to_string(), + )) + } + }) + }); + let routes = warp::any() .and(authorization_header_filter) // Note: it is critical that the `authorization_header_filter` is applied to all routes. @@ -1145,8 +1207,8 @@ pub fn serve( .or(delete_std_remotekeys), )), ) - // The auth route is the only route that is allowed to be accessed without the API token. - .or(warp::get().and(get_auth)) + // The auth route and logs are the only routes that are allowed to be accessed without the API token. + .or(warp::get().and(get_auth.or(get_log_events.boxed()))) // Maps errors into HTTP responses. .recover(warp_utils::reject::handle_rejection) // Add a `Server` header. diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 1c593b1a4e..dbb9d4d620 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -28,12 +28,14 @@ use slot_clock::{SlotClock, TestingSlotClock}; use std::future::Future; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::{tempdir, TempDir}; use tokio::runtime::Runtime; use tokio::sync::oneshot; +use types::graffiti::GraffitiString; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -134,7 +136,8 @@ impl ApiTester { listen_port: 0, allow_origin: None, }, - log: log.clone(), + sse_logging_components: None, + log, slot_clock: slot_clock.clone(), _phantom: PhantomData, }); @@ -532,7 +535,7 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None) + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None) .await .unwrap(); @@ -574,7 +577,13 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None) + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + Some(gas_limit), + None, + None, + ) .await .unwrap(); @@ -601,6 +610,7 @@ impl ApiTester { None, None, Some(builder_proposals), + None, ) .await .unwrap(); @@ -619,6 +629,34 @@ impl ApiTester { self } + + pub async fn set_graffiti(self, index: usize, graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + None, + Some(graffiti_str), + ) + .await + .unwrap(); + + self + } + + pub async fn assert_graffiti(self, index: usize, graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); + assert_eq!( + self.validator_store.graffiti(&validator.voting_pubkey), + Some(graffiti_str.into()) + ); + + self + } } struct HdValidatorScenario { @@ -722,7 +760,13 @@ fn routes_with_invalid_auth() { .await .test_with_invalid_auth(|client| async move { client - .patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None) + .patch_lighthouse_validators( + &PublicKeyBytes::empty(), + Some(false), + None, + None, + None, + ) .await }) .await @@ -930,6 +974,41 @@ fn validator_builder_proposals() { }); } +#[test] +fn validator_graffiti() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here") + .await + .assert_graffiti(0, "Mr F was here") + .await + // Test setting graffiti while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here again") + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_graffiti(0, "Mr F was here again") + .await + }); +} + #[test] fn keystore_validator_creation() { let runtime = build_runtime(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 479451f751..d60872e497 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -468,7 +468,7 @@ async fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, Some(false), None, None) + .patch_lighthouse_validators(pubkey, Some(false), None, None, None) .await .unwrap(); } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 8302edd678..c60fcea90e 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -27,6 +27,7 @@ use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; +use types::graffiti::GraffitiString; use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; @@ -154,6 +155,10 @@ impl InitializedValidator { pub fn get_index(&self) -> Option { self.index } + + pub fn get_graffiti(&self) -> Option { + self.graffiti + } } fn open_keystore(path: &Path) -> Result { @@ -697,8 +702,8 @@ impl InitializedValidators { self.validators.get(public_key) } - /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals` - /// values. + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, + /// `builder_proposals`, and `graffiti` values. /// /// ## Notes /// @@ -708,7 +713,7 @@ impl InitializedValidators { /// /// If a `gas_limit` is included in the call to this function, it will also be updated and saved /// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition` - /// or `InitializedValidator`. The same logic applies to `builder_proposals`. + /// or `InitializedValidator`. The same logic applies to `builder_proposals` and `graffiti`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. pub async fn set_validator_definition_fields( @@ -717,6 +722,7 @@ impl InitializedValidators { enabled: Option, gas_limit: Option, builder_proposals: Option, + graffiti: Option, ) -> Result<(), Error> { if let Some(def) = self .definitions @@ -734,6 +740,9 @@ impl InitializedValidators { if let Some(builder_proposals) = builder_proposals { def.builder_proposals = Some(builder_proposals); } + if let Some(graffiti) = graffiti.clone() { + def.graffiti = Some(graffiti); + } } self.update_validators().await?; @@ -749,6 +758,9 @@ impl InitializedValidators { if let Some(builder_proposals) = builder_proposals { val.builder_proposals = Some(builder_proposals); } + if let Some(graffiti) = graffiti { + val.graffiti = Some(graffiti.into()); + } } self.definitions diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index eca0b88154..f0532f8a6f 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -146,7 +146,7 @@ impl ProductionValidatorClient { context .clone() .executor - .spawn_without_exit(async move { server.await }, "metrics-api"); + .spawn_without_exit(server, "metrics-api"); Some(ctx) } else { @@ -446,11 +446,6 @@ impl ProductionValidatorClient { slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), - require_synced: if config.allow_unsynced_beacon_node { - RequireSynced::Yes - } else { - RequireSynced::No - }, spec: context.eth2_config.spec.clone(), context: duties_context, enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, @@ -492,6 +487,7 @@ impl ProductionValidatorClient { .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) .builder_registration_timestamp_override(config.builder_registration_timestamp_override) + .validator_registration_batch_size(config.validator_registration_batch_size) .build()?; let sync_committee_service = SyncCommitteeService::new( @@ -582,6 +578,7 @@ impl ProductionValidatorClient { graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), + sse_logging_components: self.context.sse_logging_components.clone(), slot_clock: self.slot_clock.clone(), log: log.clone(), _phantom: PhantomData, @@ -595,7 +592,7 @@ impl ProductionValidatorClient { self.context .clone() .executor - .spawn_without_exit(async move { server.await }, "http-api"); + .spawn_without_exit(server, "http-api"); Some(listen_addr) } else { @@ -621,8 +618,8 @@ async fn init_from_beacon_node( context: &RuntimeContext, ) -> Result<(u64, Hash256), String> { loop { - beacon_nodes.update_unready_candidates().await; - proposer_nodes.update_unready_candidates().await; + beacon_nodes.update_all_candidates().await; + proposer_nodes.update_all_candidates().await; let num_available = beacon_nodes.num_available().await; let num_total = beacon_nodes.num_total(); diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index fc80f2ded0..7d6e1744c8 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -23,9 +23,6 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; /// Number of epochs to wait before re-submitting validator registration. const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; -/// The number of validator registrations to include per request to the beacon node. -const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500; - /// Builds an `PreparationService`. pub struct PreparationServiceBuilder { validator_store: Option>>, @@ -33,6 +30,7 @@ pub struct PreparationServiceBuilder { beacon_nodes: Option>>, context: Option>, builder_registration_timestamp_override: Option, + validator_registration_batch_size: Option, } impl PreparationServiceBuilder { @@ -43,6 +41,7 @@ impl PreparationServiceBuilder { beacon_nodes: None, context: None, builder_registration_timestamp_override: None, + validator_registration_batch_size: None, } } @@ -74,6 +73,14 @@ impl PreparationServiceBuilder { self } + pub fn validator_registration_batch_size( + mut self, + validator_registration_batch_size: usize, + ) -> Self { + self.validator_registration_batch_size = Some(validator_registration_batch_size); + self + } + pub fn build(self) -> Result, String> { Ok(PreparationService { inner: Arc::new(Inner { @@ -91,6 +98,9 @@ impl PreparationServiceBuilder { .ok_or("Cannot build PreparationService without runtime_context")?, builder_registration_timestamp_override: self .builder_registration_timestamp_override, + validator_registration_batch_size: self.validator_registration_batch_size.ok_or( + "Cannot build PreparationService without validator_registration_batch_size", + )?, validator_registration_cache: RwLock::new(HashMap::new()), }), }) @@ -107,6 +117,7 @@ pub struct Inner { // Used to track unpublished validator registration changes. validator_registration_cache: RwLock>, + validator_registration_batch_size: usize, } #[derive(Hash, Eq, PartialEq, Debug, Clone)] @@ -332,7 +343,7 @@ impl PreparationService { match self .beacon_nodes .run( - RequireSynced::Yes, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { beacon_node @@ -447,11 +458,11 @@ impl PreparationService { } if !signed.is_empty() { - for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { + for batch in signed.chunks(self.validator_registration_batch_size) { match self .beacon_nodes .first_success( - RequireSynced::Yes, + RequireSynced::No, OfflineOnFailure::No, |beacon_node| async move { beacon_node.post_validator_register_validator(batch).await @@ -462,7 +473,7 @@ impl PreparationService { Ok(()) => info!( log, "Published validator registrations to the builder network"; - "count" => registration_data_len, + "count" => batch.len(), ), Err(e) => warn!( log, diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 3647396ed5..cc20cedfc6 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -178,7 +178,7 @@ impl SyncCommitteeService { let response = self .beacon_nodes .first_success( - RequireSynced::Yes, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { match beacon_node.get_beacon_blocks_root(BlockId::Head).await { diff --git a/watch/README.md b/watch/README.md index 18bf393946..34519e52e5 100644 --- a/watch/README.md +++ b/watch/README.md @@ -12,7 +12,7 @@ data which is: ### Requirements - `git` - `rust` : https://rustup.rs/ -- `libpg` : https://www.postgresql.org/download/ +- `libpq` : https://www.postgresql.org/download/ - `diesel_cli` : ``` cargo install diesel_cli --no-default-features --features postgres